Separate immediate and in-memory ScalarPair representation

Currently, we assume that ScalarPair is always represented using
a two-element struct, both as an immediate value and when stored
in memory.

This currently works fairly well, but runs into problems with
https://github.com/rust-lang/rust/pull/116672, where a ScalarPair
involving an i128 type can no longer be represented as a two-element
struct in memory. For example, the tuple `(i32, i128)` needs to be
represented in-memory as `{ i32, [3 x i32], i128 }` to satisfy
alignment requirement. Using `{ i32, i128 }` instead will result in
the second element being stored at the wrong offset (prior to
LLVM 18).

Resolve this issue by no longer requiring that the immediate and
in-memory type for ScalarPair are the same. The in-memory type
will now look the same as for normal struct types (and will include
padding filler and similar), while the immediate type stays a
simple two-element struct type. This also means that booleans in
immediate ScalarPair are now represented as i1 rather than i8,
just like we do everywhere else.

The core change here is to llvm_type (which now treats ScalarPair
as a normal struct) and immediate_llvm_type (which returns the
two-element struct that llvm_type used to produce). The rest is
fixing things up to no longer assume these are the same. In
particular, this switches places that try to get pointers to the
ScalarPair elements to use byte-geps instead of struct-geps.
This commit is contained in:
Nikita Popov 2023-12-15 12:14:39 +01:00
parent 1aa6aefdc9
commit c2fd26a115
15 changed files with 75 additions and 66 deletions

View File

@ -6,7 +6,7 @@ use crate::type_::Type;
use crate::type_of::LayoutLlvmExt; use crate::type_of::LayoutLlvmExt;
use crate::value::Value; use crate::value::Value;
use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::MemFlags;
@ -253,7 +253,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
bx.lifetime_end(llscratch, scratch_size); bx.lifetime_end(llscratch, scratch_size);
} }
} else { } else {
OperandValue::Immediate(val).store(bx, dst); OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst);
} }
} }

View File

@ -558,10 +558,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
OperandValue::Immediate(self.to_immediate(llval, place.layout)) OperandValue::Immediate(self.to_immediate(llval, place.layout))
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
let b_offset = a.size(self).align_to(b.align(self).abi); let b_offset = a.size(self).align_to(b.align(self).abi);
let pair_ty = place.layout.llvm_type(self);
let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llptr = if i == 0 {
place.llval
} else {
self.inbounds_gep(
self.type_i8(),
place.llval,
&[self.const_usize(b_offset.bytes())],
)
};
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align); let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar, layout, offset); scalar_load_metadata(self, load, scalar, layout, offset);

View File

@ -179,7 +179,10 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
unsafe { unsafe {
llvm::LLVMSetAlignment(load, align); llvm::LLVMSetAlignment(load, align);
} }
self.to_immediate(load, self.layout_of(tp_ty)) if !result.layout.is_zst() {
self.store(load, result.llval, result.align);
}
return;
} }
sym::volatile_store => { sym::volatile_store => {
let dst = args[0].deref(self.cx()); let dst = args[0].deref(self.cx());

View File

@ -26,16 +26,8 @@ fn uncached_llvm_type<'a, 'tcx>(
let element = layout.scalar_llvm_type_at(cx, element); let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count); return cx.type_vector(element, count);
} }
Abi::ScalarPair(..) => { // Treat ScalarPair like a normal aggregate for the purposes of in-memory representation.
return cx.type_struct( Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
&[
layout.scalar_pair_element_llvm_type(cx, 0, false),
layout.scalar_pair_element_llvm_type(cx, 1, false),
],
false,
);
}
Abi::Uninhabited | Abi::Aggregate { .. } => {}
} }
let name = match layout.ty.kind() { let name = match layout.ty.kind() {
@ -275,11 +267,25 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
} }
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(scalar) = self.abi { match self.abi {
Abi::Scalar(scalar) => {
if scalar.is_bool() { if scalar.is_bool() {
return cx.type_i1(); return cx.type_i1();
} }
} }
Abi::ScalarPair(..) => {
// An immediate pair always contains just the two elements, without any padding
// filler, as it should never be stored to memory.
return cx.type_struct(
&[
self.scalar_pair_element_llvm_type(cx, 0, true),
self.scalar_pair_element_llvm_type(cx, 1, true),
],
false,
);
}
_ => {}
};
self.llvm_type(cx) self.llvm_type(cx)
} }

View File

@ -231,14 +231,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
bx: &mut Bx, bx: &mut Bx,
) -> V { ) -> V {
if let OperandValue::Pair(a, b) = self.val { if let OperandValue::Pair(a, b) = self.val {
let llty = bx.cx().backend_type(self.layout); let llty = bx.cx().immediate_backend_type(self.layout);
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
// Reconstruct the immediate aggregate. // Reconstruct the immediate aggregate.
let mut llpair = bx.cx().const_poison(llty); let mut llpair = bx.cx().const_poison(llty);
let imm_a = bx.from_immediate(a); llpair = bx.insert_value(llpair, a, 0);
let imm_b = bx.from_immediate(b); llpair = bx.insert_value(llpair, b, 1);
llpair = bx.insert_value(llpair, imm_a, 0);
llpair = bx.insert_value(llpair, imm_b, 1);
llpair llpair
} else { } else {
self.immediate() self.immediate()
@ -251,14 +249,12 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
llval: V, llval: V,
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
) -> Self { ) -> Self {
let val = if let Abi::ScalarPair(a, b) = layout.abi { let val = if let Abi::ScalarPair(..) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
// Deconstruct the immediate aggregate. // Deconstruct the immediate aggregate.
let a_llval = bx.extract_value(llval, 0); let a_llval = bx.extract_value(llval, 0);
let a_llval = bx.to_immediate_scalar(a_llval, a);
let b_llval = bx.extract_value(llval, 1); let b_llval = bx.extract_value(llval, 1);
let b_llval = bx.to_immediate_scalar(b_llval, b);
OperandValue::Pair(a_llval, b_llval) OperandValue::Pair(a_llval, b_llval)
} else { } else {
OperandValue::Immediate(llval) OperandValue::Immediate(llval)
@ -435,15 +431,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else { let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
}; };
let ty = bx.backend_type(dest.layout);
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
let llptr = bx.struct_gep(ty, dest.llval, 0);
let val = bx.from_immediate(a); let val = bx.from_immediate(a);
let align = dest.align; let align = dest.align;
bx.store_with_flags(val, llptr, align, flags); bx.store_with_flags(val, dest.llval, align, flags);
let llptr = bx.struct_gep(ty, dest.llval, 1); let llptr =
bx.inbounds_gep(bx.type_i8(), dest.llval, &[bx.const_usize(b_offset.bytes())]);
let val = bx.from_immediate(b); let val = bx.from_immediate(b);
let align = dest.align.restrict_for_offset(b_offset); let align = dest.align.restrict_for_offset(b_offset);
bx.store_with_flags(val, llptr, align, flags); bx.store_with_flags(val, llptr, align, flags);

View File

@ -112,8 +112,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) => if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
{ {
// Offset matches second field. // Offset matches second field.
let ty = bx.backend_type(self.layout); bx.inbounds_gep(bx.type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
bx.struct_gep(ty, self.llval, 1)
} }
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields (even some that require alignment) are not included in Scalar, // ZST fields (even some that require alignment) are not included in Scalar,

View File

@ -106,21 +106,21 @@ pub struct ForceAlign16 {
pub unsafe fn call_na1(x: NaturalAlign1) { pub unsafe fn call_na1(x: NaturalAlign1) {
// CHECK: start: // CHECK: start:
// m68k: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 1 // m68k: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
// m68k: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}} [[ALLOCA]]) // m68k: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]])
// wasm: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 1 // wasm: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 1
// wasm: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}} [[ALLOCA]]) // wasm: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}} [[ALLOCA]])
// x86_64-linux: call void @natural_align_1(i16 // x86_64-linux: call void @natural_align_1(i16
// x86_64-windows: call void @natural_align_1(i16 // x86_64-windows: call void @natural_align_1(i16
// i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 4 // i686-linux: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
// i686-linux: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}} [[ALLOCA]]) // i686-linux: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]])
// i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca { i8, i8 }, align 4 // i686-windows: [[ALLOCA:%[a-z0-9+]]] = alloca %NaturalAlign1, align 4
// i686-windows: call void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}} [[ALLOCA]]) // i686-windows: call void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}} [[ALLOCA]])
natural_align_1(x); natural_align_1(x);
} }
@ -199,17 +199,17 @@ pub unsafe fn call_fa16(x: ForceAlign16) {
} }
extern "C" { extern "C" {
// m68k: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}}) // m68k: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}})
// wasm: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 1{{.*}}) // wasm: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 1{{.*}})
// x86_64-linux: declare void @natural_align_1(i16) // x86_64-linux: declare void @natural_align_1(i16)
// x86_64-windows: declare void @natural_align_1(i16) // x86_64-windows: declare void @natural_align_1(i16)
// i686-linux: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}}) // i686-linux: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}})
// i686-windows: declare void @natural_align_1({{.*}}byval({ i8, i8 }) align 4{{.*}}) // i686-windows: declare void @natural_align_1({{.*}}byval(%NaturalAlign1) align 4{{.*}})
fn natural_align_1(x: NaturalAlign1); fn natural_align_1(x: NaturalAlign1);
// m68k: declare void @natural_align_2({{.*}}byval(%NaturalAlign2) align 2{{.*}}) // m68k: declare void @natural_align_2({{.*}}byval(%NaturalAlign2) align 2{{.*}})

View File

@ -57,7 +57,7 @@ pub fn nested64(a: Align64, b: i32, c: i32, d: i8) -> Nested64 {
// CHECK-LABEL: @enum4 // CHECK-LABEL: @enum4
#[no_mangle] #[no_mangle]
pub fn enum4(a: i32) -> Enum4 { pub fn enum4(a: i32) -> Enum4 {
// CHECK: %e4 = alloca { i32, i32 }, align 4 // CHECK: %e4 = alloca %Enum4, align 4
let e4 = Enum4::A(a); let e4 = Enum4::A(a);
e4 e4
} }

View File

@ -55,7 +55,7 @@ pub fn struct_call(x: S, f: fn(S) -> S) -> S {
f(x) f(x)
} }
// CHECK: { i8, i8 } @enum_(i1 zeroext %x.0, i8 %x.1) // CHECK: { i1, i8 } @enum_(i1 zeroext %x.0, i8 %x.1)
#[no_mangle] #[no_mangle]
pub fn enum_(x: Option<u8>) -> Option<u8> { pub fn enum_(x: Option<u8>) -> Option<u8> {
x x
@ -64,6 +64,6 @@ pub fn enum_(x: Option<u8>) -> Option<u8> {
// CHECK-LABEL: @enum_call // CHECK-LABEL: @enum_call
#[no_mangle] #[no_mangle]
pub fn enum_call(x: Option<u8>, f: fn(Option<u8>) -> Option<u8>) -> Option<u8> { pub fn enum_call(x: Option<u8>, f: fn(Option<u8>) -> Option<u8>) -> Option<u8> {
// CHECK: call { i8, i8 } %f(i1 zeroext %x.0, i8 %x.1) // CHECK: call { i1, i8 } %f(i1 zeroext %x.0, i8 %x.1)
f(x) f(x)
} }

View File

@ -275,7 +275,7 @@ pub fn enum_id_1(x: Option<Result<u16, u16>>) -> Option<Result<u16, u16>> {
x x
} }
// CHECK: { i8, i8 } @enum_id_2(i1 noundef zeroext %x.0, i8 %x.1) // CHECK: { i1, i8 } @enum_id_2(i1 noundef zeroext %x.0, i8 %x.1)
#[no_mangle] #[no_mangle]
pub fn enum_id_2(x: Option<u8>) -> Option<u8> { pub fn enum_id_2(x: Option<u8>) -> Option<u8> {
x x

View File

@ -190,7 +190,7 @@ pub unsafe fn check_byte_from_bool(x: bool) -> u8 {
// CHECK-LABEL: @check_to_pair( // CHECK-LABEL: @check_to_pair(
#[no_mangle] #[no_mangle]
pub unsafe fn check_to_pair(x: u64) -> Option<i32> { pub unsafe fn check_to_pair(x: u64) -> Option<i32> {
// CHECK: %_0 = alloca { i32, i32 }, align 4 // CHECK: %_0 = alloca %"core::option::Option<i32>", align 4
// CHECK: store i64 %x, ptr %_0, align 4 // CHECK: store i64 %x, ptr %_0, align 4
transmute(x) transmute(x)
} }
@ -203,10 +203,10 @@ pub unsafe fn check_from_pair(x: Option<i32>) -> u64 {
const { assert!(std::mem::align_of::<Option<i32>>() == 4) }; const { assert!(std::mem::align_of::<Option<i32>>() == 4) };
// CHECK: %_0 = alloca i64, align 8 // CHECK: %_0 = alloca i64, align 8
// CHECK: store i32 %x.0, ptr %0, align 8 // CHECK: store i32 %x.0, ptr %_0, align 8
// CHECK: store i32 %x.1, ptr %1, align 4 // CHECK: store i32 %x.1, ptr %0, align 4
// CHECK: %2 = load i64, ptr %_0, align 8 // CHECK: %[[R:.+]] = load i64, ptr %_0, align 8
// CHECK: ret i64 %2 // CHECK: ret i64 %[[R]]
transmute(x) transmute(x)
} }

View File

@ -24,7 +24,7 @@ pub fn test() {
let _s = S; let _s = S;
// Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just
// in the first one. // in the first one.
// CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32 } // CHECK: [[SLOT:%[0-9]+]] = alloca { ptr, i32, [1 x i32] }
// CHECK-LABEL: cleanup: // CHECK-LABEL: cleanup:
// CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}}) // CHECK: call void @llvm.lifetime.start.{{.*}}({{.*}})
// CHECK-LABEL: cleanup1: // CHECK-LABEL: cleanup1:

View File

@ -13,9 +13,8 @@ pub fn helper(_: usize) {
pub fn ref_dst(s: &[u8]) { pub fn ref_dst(s: &[u8]) {
// We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
// directly to the alloca for "x" // directly to the alloca for "x"
// CHECK: [[X0:%[0-9]+]] = getelementptr inbounds { ptr, [[USIZE]] }, {{.*}} %x, i32 0, i32 0 // CHECK: store ptr %s.0, {{.*}} %x
// CHECK: store ptr %s.0, {{.*}} [[X0]] // CHECK: [[X1:%[0-9]+]] = getelementptr inbounds i8, {{.*}} %x, {{i32 4|i64 8}}
// CHECK: [[X1:%[0-9]+]] = getelementptr inbounds { ptr, [[USIZE]] }, {{.*}} %x, i32 0, i32 1
// CHECK: store [[USIZE]] %s.1, {{.*}} [[X1]] // CHECK: store [[USIZE]] %s.1, {{.*}} [[X1]]
let x = &*s; let x = &*s;

View File

@ -2,25 +2,25 @@
#![crate_type = "lib"] #![crate_type = "lib"]
// CHECK: define{{.*}}{ i8, i8 } @pair_bool_bool(i1 noundef zeroext %pair.0, i1 noundef zeroext %pair.1) // CHECK: define{{.*}}{ i1, i1 } @pair_bool_bool(i1 noundef zeroext %pair.0, i1 noundef zeroext %pair.1)
#[no_mangle] #[no_mangle]
pub fn pair_bool_bool(pair: (bool, bool)) -> (bool, bool) { pub fn pair_bool_bool(pair: (bool, bool)) -> (bool, bool) {
pair pair
} }
// CHECK: define{{.*}}{ i8, i32 } @pair_bool_i32(i1 noundef zeroext %pair.0, i32 noundef %pair.1) // CHECK: define{{.*}}{ i1, i32 } @pair_bool_i32(i1 noundef zeroext %pair.0, i32 noundef %pair.1)
#[no_mangle] #[no_mangle]
pub fn pair_bool_i32(pair: (bool, i32)) -> (bool, i32) { pub fn pair_bool_i32(pair: (bool, i32)) -> (bool, i32) {
pair pair
} }
// CHECK: define{{.*}}{ i32, i8 } @pair_i32_bool(i32 noundef %pair.0, i1 noundef zeroext %pair.1) // CHECK: define{{.*}}{ i32, i1 } @pair_i32_bool(i32 noundef %pair.0, i1 noundef zeroext %pair.1)
#[no_mangle] #[no_mangle]
pub fn pair_i32_bool(pair: (i32, bool)) -> (i32, bool) { pub fn pair_i32_bool(pair: (i32, bool)) -> (i32, bool) {
pair pair
} }
// CHECK: define{{.*}}{ i8, i8 } @pair_and_or(i1 noundef zeroext %_1.0, i1 noundef zeroext %_1.1) // CHECK: define{{.*}}{ i1, i1 } @pair_and_or(i1 noundef zeroext %_1.0, i1 noundef zeroext %_1.1)
#[no_mangle] #[no_mangle]
pub fn pair_and_or((a, b): (bool, bool)) -> (bool, bool) { pub fn pair_and_or((a, b): (bool, bool)) -> (bool, bool) {
// Make sure it can operate directly on the unpacked args // Make sure it can operate directly on the unpacked args

View File

@ -15,7 +15,7 @@
// CHECK-LABEL: @slice_iter_next( // CHECK-LABEL: @slice_iter_next(
#[no_mangle] #[no_mangle]
pub fn slice_iter_next<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> { pub fn slice_iter_next<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> {
// CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}}
// CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]]
// CHECK-SAME: !nonnull // CHECK-SAME: !nonnull
// CHECK-SAME: !noundef // CHECK-SAME: !noundef
@ -32,7 +32,7 @@ pub fn slice_iter_next<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32
// CHECK-LABEL: @slice_iter_next_back( // CHECK-LABEL: @slice_iter_next_back(
#[no_mangle] #[no_mangle]
pub fn slice_iter_next_back<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> { pub fn slice_iter_next_back<'a>(it: &mut std::slice::Iter<'a, u32>) -> Option<&'a u32> {
// CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}}
// CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]]
// CHECK-SAME: !nonnull // CHECK-SAME: !nonnull
// CHECK-SAME: !noundef // CHECK-SAME: !noundef
@ -84,7 +84,7 @@ pub fn slice_iter_mut_new(slice: &mut [u32]) -> std::slice::IterMut<'_, u32> {
// CHECK-LABEL: @slice_iter_is_empty // CHECK-LABEL: @slice_iter_is_empty
#[no_mangle] #[no_mangle]
pub fn slice_iter_is_empty(it: &std::slice::Iter<'_, u32>) -> bool { pub fn slice_iter_is_empty(it: &std::slice::Iter<'_, u32>) -> bool {
// CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}}
// CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]]
// CHECK-SAME: !nonnull // CHECK-SAME: !nonnull
// CHECK-SAME: !noundef // CHECK-SAME: !noundef
@ -100,7 +100,7 @@ pub fn slice_iter_is_empty(it: &std::slice::Iter<'_, u32>) -> bool {
// CHECK-LABEL: @slice_iter_len // CHECK-LABEL: @slice_iter_len
#[no_mangle] #[no_mangle]
pub fn slice_iter_len(it: &std::slice::Iter<'_, u32>) -> usize { pub fn slice_iter_len(it: &std::slice::Iter<'_, u32>) -> usize {
// CHECK: %[[ENDP:.+]] = getelementptr{{.+}}ptr %it,{{.+}} 1 // CHECK: %[[ENDP:.+]] = getelementptr inbounds i8, ptr %it, {{i32 4|i64 8}}
// CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]] // CHECK: %[[END:.+]] = load ptr, ptr %[[ENDP]]
// CHECK-SAME: !nonnull // CHECK-SAME: !nonnull
// CHECK-SAME: !noundef // CHECK-SAME: !noundef