mirror of https://github.com/rust-lang/rust.git
Auto merge of #121665 - erikdesjardins:ptradd, r=nikic
Always generate GEP i8 / ptradd for struct offsets This implements #98615, and goes a bit further to remove `struct_gep` entirely. Upstream LLVM is in the beginning stages of [migrating to `ptradd`](https://discourse.llvm.org/t/rfc-replacing-getelementptr-with-ptradd/68699). LLVM 19 will [canonicalize](https://github.com/llvm/llvm-project/pull/68882) all constant-offset GEPs to i8, which has roughly the same effect as this change. Fixes #121719. Split out from #121577. r? `@nikic`
This commit is contained in:
commit
70aa0b86c0
|
@ -834,10 +834,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||||
}
|
}
|
||||||
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
||||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||||
let pair_type = place.layout.gcc_type(self);
|
|
||||||
|
|
||||||
let mut load = |i, scalar: &abi::Scalar, align| {
|
let mut load = |i, scalar: &abi::Scalar, align| {
|
||||||
let llptr = self.struct_gep(pair_type, place.llval, i as u64);
|
let llptr = if i == 0 {
|
||||||
|
place.llval
|
||||||
|
} else {
|
||||||
|
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
|
||||||
|
};
|
||||||
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
|
let llty = place.layout.scalar_pair_element_gcc_type(self, i);
|
||||||
let load = self.load(llty, llptr, align);
|
let load = self.load(llty, llptr, align);
|
||||||
scalar_load_metadata(self, load, scalar);
|
scalar_load_metadata(self, load, scalar);
|
||||||
|
@ -971,33 +974,6 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
|
||||||
result.get_address(None)
|
result.get_address(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
|
|
||||||
// FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
|
|
||||||
assert_eq!(idx as usize as u64, idx);
|
|
||||||
let value = ptr.dereference(None).to_rvalue();
|
|
||||||
|
|
||||||
if value_type.dyncast_array().is_some() {
|
|
||||||
let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
|
|
||||||
let element = self.context.new_array_access(None, value, index);
|
|
||||||
element.get_address(None)
|
|
||||||
}
|
|
||||||
else if let Some(vector_type) = value_type.dyncast_vector() {
|
|
||||||
let array_type = vector_type.get_element_type().make_pointer();
|
|
||||||
let array = self.bitcast(ptr, array_type);
|
|
||||||
let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
|
|
||||||
let element = self.context.new_array_access(None, array, index);
|
|
||||||
element.get_address(None)
|
|
||||||
}
|
|
||||||
else if let Some(struct_type) = value_type.is_struct() {
|
|
||||||
// NOTE: due to opaque pointers now being used, we need to bitcast here.
|
|
||||||
let ptr = self.bitcast_if_needed(ptr, value_type.make_pointer());
|
|
||||||
ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
panic!("Unexpected type {:?}", value_type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Casts */
|
/* Casts */
|
||||||
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
|
||||||
// TODO(antoyo): check that it indeed truncate the value.
|
// TODO(antoyo): check that it indeed truncate the value.
|
||||||
|
|
|
@ -151,7 +151,6 @@ pub trait LayoutGccExt<'tcx> {
|
||||||
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
|
fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
|
||||||
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
|
fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
|
||||||
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
|
fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
|
||||||
fn gcc_field_index(&self, index: usize) -> u64;
|
|
||||||
fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
|
fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,24 +305,6 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
self.scalar_gcc_type_at(cx, scalar, offset)
|
self.scalar_gcc_type_at(cx, scalar, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gcc_field_index(&self, index: usize) -> u64 {
|
|
||||||
match self.abi {
|
|
||||||
Abi::Scalar(_) | Abi::ScalarPair(..) => {
|
|
||||||
bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
match self.fields {
|
|
||||||
FieldsShape::Primitive | FieldsShape::Union(_) => {
|
|
||||||
bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
|
|
||||||
}
|
|
||||||
|
|
||||||
FieldsShape::Array { .. } => index as u64,
|
|
||||||
|
|
||||||
FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
|
fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
|
||||||
if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
|
if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
|
||||||
return pointee;
|
return pointee;
|
||||||
|
@ -353,10 +334,6 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
|
||||||
layout.is_gcc_scalar_pair()
|
layout.is_gcc_scalar_pair()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
|
|
||||||
layout.gcc_field_index(index)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
|
fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
|
||||||
layout.scalar_pair_element_gcc_type(self, index)
|
layout.scalar_pair_element_gcc_type(self, index)
|
||||||
}
|
}
|
||||||
|
|
|
@ -603,11 +603,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let llptr = if i == 0 {
|
let llptr = if i == 0 {
|
||||||
place.llval
|
place.llval
|
||||||
} else {
|
} else {
|
||||||
self.inbounds_gep(
|
self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
|
||||||
self.type_i8(),
|
|
||||||
place.llval,
|
|
||||||
&[self.const_usize(b_offset.bytes())],
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
||||||
let load = self.load(llty, llptr, align);
|
let load = self.load(llty, llptr, align);
|
||||||
|
@ -778,11 +774,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
|
|
||||||
assert_eq!(idx as c_uint as u64, idx);
|
|
||||||
unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Casts */
|
/* Casts */
|
||||||
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
|
||||||
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
|
||||||
|
|
|
@ -1306,13 +1306,6 @@ extern "C" {
|
||||||
NumIndices: c_uint,
|
NumIndices: c_uint,
|
||||||
Name: *const c_char,
|
Name: *const c_char,
|
||||||
) -> &'a Value;
|
) -> &'a Value;
|
||||||
pub fn LLVMBuildStructGEP2<'a>(
|
|
||||||
B: &Builder<'a>,
|
|
||||||
Ty: &'a Type,
|
|
||||||
Pointer: &'a Value,
|
|
||||||
Idx: c_uint,
|
|
||||||
Name: *const c_char,
|
|
||||||
) -> &'a Value;
|
|
||||||
|
|
||||||
// Casts
|
// Casts
|
||||||
pub fn LLVMBuildTrunc<'a>(
|
pub fn LLVMBuildTrunc<'a>(
|
||||||
|
|
|
@ -261,9 +261,6 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
||||||
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
|
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
|
||||||
layout.is_llvm_scalar_pair()
|
layout.is_llvm_scalar_pair()
|
||||||
}
|
}
|
||||||
fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
|
|
||||||
layout.llvm_field_index(self, index)
|
|
||||||
}
|
|
||||||
fn scalar_pair_element_backend_type(
|
fn scalar_pair_element_backend_type(
|
||||||
&self,
|
&self,
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
|
|
|
@ -174,7 +174,6 @@ pub trait LayoutLlvmExt<'tcx> {
|
||||||
index: usize,
|
index: usize,
|
||||||
immediate: bool,
|
immediate: bool,
|
||||||
) -> &'a Type;
|
) -> &'a Type;
|
||||||
fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
|
|
||||||
fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
|
fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -326,42 +325,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
self.scalar_llvm_type_at(cx, scalar)
|
self.scalar_llvm_type_at(cx, scalar)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
|
|
||||||
match self.abi {
|
|
||||||
Abi::Scalar(_) | Abi::ScalarPair(..) => {
|
|
||||||
bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
match self.fields {
|
|
||||||
FieldsShape::Primitive | FieldsShape::Union(_) => {
|
|
||||||
bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
|
|
||||||
}
|
|
||||||
|
|
||||||
FieldsShape::Array { .. } => index as u64,
|
|
||||||
|
|
||||||
FieldsShape::Arbitrary { .. } => {
|
|
||||||
let variant_index = match self.variants {
|
|
||||||
Variants::Single { index } => Some(index),
|
|
||||||
_ => None,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Look up llvm field if indexes do not match memory order due to padding. If
|
|
||||||
// `field_remapping` is `None` no padding was used and the llvm field index
|
|
||||||
// matches the memory index.
|
|
||||||
match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
|
|
||||||
Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
|
|
||||||
remap[index] as u64
|
|
||||||
}
|
|
||||||
Some(_) => self.fields.memory_index(index) as u64,
|
|
||||||
None => {
|
|
||||||
bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
|
fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
|
||||||
debug_assert!(self.is_sized());
|
debug_assert!(self.is_sized());
|
||||||
|
|
||||||
|
|
|
@ -44,12 +44,12 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
|
||||||
|
|
||||||
let aligned_size = size.align_to(slot_size).bytes() as i32;
|
let aligned_size = size.align_to(slot_size).bytes() as i32;
|
||||||
let full_direct_size = bx.cx().const_i32(aligned_size);
|
let full_direct_size = bx.cx().const_i32(aligned_size);
|
||||||
let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
|
let next = bx.inbounds_ptradd(addr, full_direct_size);
|
||||||
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
|
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
|
||||||
|
|
||||||
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
|
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
|
||||||
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
|
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
|
||||||
let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
|
let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
|
||||||
(adjusted, addr_align)
|
(adjusted, addr_align)
|
||||||
} else {
|
} else {
|
||||||
(addr, addr_align)
|
(addr, addr_align)
|
||||||
|
@ -89,11 +89,31 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
|
||||||
list: OperandRef<'tcx, &'ll Value>,
|
list: OperandRef<'tcx, &'ll Value>,
|
||||||
target_ty: Ty<'tcx>,
|
target_ty: Ty<'tcx>,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
|
let dl = bx.cx.data_layout();
|
||||||
|
|
||||||
// Implementation of the AAPCS64 calling convention for va_args see
|
// Implementation of the AAPCS64 calling convention for va_args see
|
||||||
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
|
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
|
||||||
|
//
|
||||||
|
// typedef struct va_list {
|
||||||
|
// void * stack; // next stack param
|
||||||
|
// void * gr_top; // end of GP arg reg save area
|
||||||
|
// void * vr_top; // end of FP/SIMD arg reg save area
|
||||||
|
// int gr_offs; // offset from gr_top to next GP register arg
|
||||||
|
// int vr_offs; // offset from vr_top to next FP/SIMD register arg
|
||||||
|
// } va_list;
|
||||||
let va_list_addr = list.immediate();
|
let va_list_addr = list.immediate();
|
||||||
let va_list_layout = list.deref(bx.cx).layout;
|
|
||||||
let va_list_ty = va_list_layout.llvm_type(bx);
|
// There is no padding between fields since `void*` is size=8 align=8, `int` is size=4 align=4.
|
||||||
|
// See https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
|
||||||
|
// Table 1, Byte size and byte alignment of fundamental data types
|
||||||
|
// Table 3, Mapping of C & C++ built-in data types
|
||||||
|
let ptr_offset = 8;
|
||||||
|
let i32_offset = 4;
|
||||||
|
let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
|
||||||
|
let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
|
||||||
|
let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
|
||||||
|
let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
|
||||||
|
|
||||||
let layout = bx.cx.layout_of(target_ty);
|
let layout = bx.cx.layout_of(target_ty);
|
||||||
|
|
||||||
let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
|
let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
|
||||||
|
@ -104,16 +124,12 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
|
||||||
let offset_align = Align::from_bytes(4).unwrap();
|
let offset_align = Align::from_bytes(4).unwrap();
|
||||||
|
|
||||||
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
|
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
|
||||||
let (reg_off, reg_top_index, slot_size) = if gr_type {
|
let (reg_off, reg_top, slot_size) = if gr_type {
|
||||||
let gr_offs =
|
|
||||||
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
|
|
||||||
let nreg = (layout.size.bytes() + 7) / 8;
|
let nreg = (layout.size.bytes() + 7) / 8;
|
||||||
(gr_offs, va_list_layout.llvm_field_index(bx.cx, 1), nreg * 8)
|
(gr_offs, gr_top, nreg * 8)
|
||||||
} else {
|
} else {
|
||||||
let vr_off =
|
|
||||||
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 4));
|
|
||||||
let nreg = (layout.size.bytes() + 15) / 16;
|
let nreg = (layout.size.bytes() + 15) / 16;
|
||||||
(vr_off, va_list_layout.llvm_field_index(bx.cx, 2), nreg * 16)
|
(vr_offs, vr_top, nreg * 16)
|
||||||
};
|
};
|
||||||
|
|
||||||
// if the offset >= 0 then the value will be on the stack
|
// if the offset >= 0 then the value will be on the stack
|
||||||
|
@ -141,15 +157,14 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
|
||||||
|
|
||||||
bx.switch_to_block(in_reg);
|
bx.switch_to_block(in_reg);
|
||||||
let top_type = bx.type_ptr();
|
let top_type = bx.type_ptr();
|
||||||
let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
|
let top = bx.load(top_type, reg_top, dl.pointer_align.abi);
|
||||||
let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
|
|
||||||
|
|
||||||
// reg_value = *(@top + reg_off_v);
|
// reg_value = *(@top + reg_off_v);
|
||||||
let mut reg_addr = bx.gep(bx.type_i8(), top, &[reg_off_v]);
|
let mut reg_addr = bx.ptradd(top, reg_off_v);
|
||||||
if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
|
if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
|
||||||
// On big-endian systems the value is right-aligned in its slot.
|
// On big-endian systems the value is right-aligned in its slot.
|
||||||
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
|
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
|
||||||
reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
|
reg_addr = bx.ptradd(reg_addr, offset);
|
||||||
}
|
}
|
||||||
let reg_type = layout.llvm_type(bx);
|
let reg_type = layout.llvm_type(bx);
|
||||||
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
|
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
|
||||||
|
@ -173,11 +188,29 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
|
||||||
list: OperandRef<'tcx, &'ll Value>,
|
list: OperandRef<'tcx, &'ll Value>,
|
||||||
target_ty: Ty<'tcx>,
|
target_ty: Ty<'tcx>,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
|
let dl = bx.cx.data_layout();
|
||||||
|
|
||||||
// Implementation of the s390x ELF ABI calling convention for va_args see
|
// Implementation of the s390x ELF ABI calling convention for va_args see
|
||||||
// https://github.com/IBM/s390x-abi (chapter 1.2.4)
|
// https://github.com/IBM/s390x-abi (chapter 1.2.4)
|
||||||
|
//
|
||||||
|
// typedef struct __va_list_tag {
|
||||||
|
// long __gpr;
|
||||||
|
// long __fpr;
|
||||||
|
// void *__overflow_arg_area;
|
||||||
|
// void *__reg_save_area;
|
||||||
|
// } va_list[1];
|
||||||
let va_list_addr = list.immediate();
|
let va_list_addr = list.immediate();
|
||||||
let va_list_layout = list.deref(bx.cx).layout;
|
|
||||||
let va_list_ty = va_list_layout.llvm_type(bx);
|
// There is no padding between fields since `long` and `void*` both have size=8 align=8.
|
||||||
|
// https://github.com/IBM/s390x-abi (Table 1.1.: Scalar types)
|
||||||
|
let i64_offset = 8;
|
||||||
|
let ptr_offset = 8;
|
||||||
|
let gpr = va_list_addr;
|
||||||
|
let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
|
||||||
|
let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
|
||||||
|
let reg_save_area =
|
||||||
|
bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
|
||||||
|
|
||||||
let layout = bx.cx.layout_of(target_ty);
|
let layout = bx.cx.layout_of(target_ty);
|
||||||
|
|
||||||
let in_reg = bx.append_sibling_block("va_arg.in_reg");
|
let in_reg = bx.append_sibling_block("va_arg.in_reg");
|
||||||
|
@ -192,15 +225,10 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
|
||||||
let padding = padded_size - unpadded_size;
|
let padding = padded_size - unpadded_size;
|
||||||
|
|
||||||
let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
|
let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
|
||||||
let (max_regs, reg_count_field, reg_save_index, reg_padding) =
|
let (max_regs, reg_count, reg_save_index, reg_padding) =
|
||||||
if gpr_type { (5, 0, 2, padding) } else { (4, 1, 16, 0) };
|
if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
|
||||||
|
|
||||||
// Check whether the value was passed in a register or in memory.
|
// Check whether the value was passed in a register or in memory.
|
||||||
let reg_count = bx.struct_gep(
|
|
||||||
va_list_ty,
|
|
||||||
va_list_addr,
|
|
||||||
va_list_layout.llvm_field_index(bx.cx, reg_count_field),
|
|
||||||
);
|
|
||||||
let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
|
let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
|
||||||
let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
|
let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
|
||||||
bx.cond_br(use_regs, in_reg, in_mem);
|
bx.cond_br(use_regs, in_reg, in_mem);
|
||||||
|
@ -209,12 +237,10 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
|
||||||
bx.switch_to_block(in_reg);
|
bx.switch_to_block(in_reg);
|
||||||
|
|
||||||
// Work out the address of the value in the register save area.
|
// Work out the address of the value in the register save area.
|
||||||
let reg_ptr =
|
let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi);
|
||||||
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
|
|
||||||
let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
|
|
||||||
let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
|
let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
|
||||||
let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
|
let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
|
||||||
let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
|
let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
|
||||||
|
|
||||||
// Update the register count.
|
// Update the register count.
|
||||||
let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
|
let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
|
||||||
|
@ -225,27 +251,23 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
|
||||||
bx.switch_to_block(in_mem);
|
bx.switch_to_block(in_mem);
|
||||||
|
|
||||||
// Work out the address of the value in the argument overflow area.
|
// Work out the address of the value in the argument overflow area.
|
||||||
let arg_ptr =
|
let arg_ptr_v =
|
||||||
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
|
bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi);
|
||||||
let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
|
|
||||||
let arg_off = bx.const_u64(padding);
|
let arg_off = bx.const_u64(padding);
|
||||||
let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
|
let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
|
||||||
|
|
||||||
// Update the argument overflow area pointer.
|
// Update the argument overflow area pointer.
|
||||||
let arg_size = bx.cx().const_u64(padded_size);
|
let arg_size = bx.cx().const_u64(padded_size);
|
||||||
let new_arg_ptr_v = bx.inbounds_gep(bx.type_i8(), arg_ptr_v, &[arg_size]);
|
let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
|
||||||
bx.store(new_arg_ptr_v, arg_ptr, bx.tcx().data_layout.pointer_align.abi);
|
bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi);
|
||||||
bx.br(end);
|
bx.br(end);
|
||||||
|
|
||||||
// Return the appropriate result.
|
// Return the appropriate result.
|
||||||
bx.switch_to_block(end);
|
bx.switch_to_block(end);
|
||||||
let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
|
let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
|
||||||
let val_type = layout.llvm_type(bx);
|
let val_type = layout.llvm_type(bx);
|
||||||
let val_addr = if indirect {
|
let val_addr =
|
||||||
bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
|
if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr };
|
||||||
} else {
|
|
||||||
val_addr
|
|
||||||
};
|
|
||||||
bx.load(val_type, val_addr, layout.align.abi)
|
bx.load(val_type, val_addr, layout.align.abi)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -437,8 +437,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
||||||
let align = dest.align;
|
let align = dest.align;
|
||||||
bx.store_with_flags(val, dest.llval, align, flags);
|
bx.store_with_flags(val, dest.llval, align, flags);
|
||||||
|
|
||||||
let llptr =
|
let llptr = bx.inbounds_ptradd(dest.llval, bx.const_usize(b_offset.bytes()));
|
||||||
bx.inbounds_gep(bx.type_i8(), dest.llval, &[bx.const_usize(b_offset.bytes())]);
|
|
||||||
let val = bx.from_immediate(b);
|
let val = bx.from_immediate(b);
|
||||||
let align = dest.align.restrict_for_offset(b_offset);
|
let align = dest.align.restrict_for_offset(b_offset);
|
||||||
bx.store_with_flags(val, llptr, align, flags);
|
bx.store_with_flags(val, llptr, align, flags);
|
||||||
|
@ -476,7 +475,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
||||||
let address = bx.ptrtoint(alloca, bx.type_isize());
|
let address = bx.ptrtoint(alloca, bx.type_isize());
|
||||||
let neg_address = bx.neg(address);
|
let neg_address = bx.neg(address);
|
||||||
let offset = bx.and(neg_address, align_minus_1);
|
let offset = bx.and(neg_address, align_minus_1);
|
||||||
let dst = bx.inbounds_gep(bx.type_i8(), alloca, &[offset]);
|
let dst = bx.inbounds_ptradd(alloca, offset);
|
||||||
bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
|
bx.memcpy(dst, min_align, llptr, min_align, size, MemFlags::empty());
|
||||||
|
|
||||||
// Store the allocated region and the extra to the indirect place.
|
// Store the allocated region and the extra to the indirect place.
|
||||||
|
|
|
@ -9,7 +9,7 @@ use rustc_middle::mir;
|
||||||
use rustc_middle::mir::tcx::PlaceTy;
|
use rustc_middle::mir::tcx::PlaceTy;
|
||||||
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::ty::{self, Ty};
|
use rustc_middle::ty::{self, Ty};
|
||||||
use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding};
|
use rustc_target::abi::{Align, FieldsShape, Int, Pointer, TagEncoding};
|
||||||
use rustc_target::abi::{VariantIdx, Variants};
|
use rustc_target::abi::{VariantIdx, Variants};
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
@ -102,34 +102,14 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||||
// `simple` is called when we don't need to adjust the offset to
|
// `simple` is called when we don't need to adjust the offset to
|
||||||
// the dynamic alignment of the field.
|
// the dynamic alignment of the field.
|
||||||
let mut simple = || {
|
let mut simple = || {
|
||||||
let llval = match self.layout.abi {
|
let llval = if offset.bytes() == 0 {
|
||||||
_ if offset.bytes() == 0 => {
|
|
||||||
// Unions and newtypes only use an offset of 0.
|
|
||||||
// Also handles the first field of Scalar, ScalarPair, and Vector layouts.
|
|
||||||
self.llval
|
self.llval
|
||||||
}
|
} else if field.is_zst() {
|
||||||
Abi::ScalarPair(..) => {
|
// FIXME(erikdesjardins): it should be fine to use inbounds for ZSTs too;
|
||||||
// FIXME(nikic): Generate this for all ABIs.
|
// keeping this logic for now to preserve previous behavior.
|
||||||
bx.inbounds_gep(bx.type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
|
bx.ptradd(self.llval, bx.const_usize(offset.bytes()))
|
||||||
}
|
} else {
|
||||||
Abi::Scalar(_) | Abi::Vector { .. } if field.is_zst() => {
|
bx.inbounds_ptradd(self.llval, bx.const_usize(offset.bytes()))
|
||||||
// ZST fields (even some that require alignment) are not included in Scalar,
|
|
||||||
// ScalarPair, and Vector layouts, so manually offset the pointer.
|
|
||||||
bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
|
|
||||||
}
|
|
||||||
Abi::Scalar(_) => {
|
|
||||||
// All fields of Scalar layouts must have been handled by this point.
|
|
||||||
// Vector layouts have additional fields for each element of the vector, so don't panic in that case.
|
|
||||||
bug!(
|
|
||||||
"offset of non-ZST field `{:?}` does not match layout `{:#?}`",
|
|
||||||
field,
|
|
||||||
self.layout
|
|
||||||
);
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
let ty = bx.backend_type(self.layout);
|
|
||||||
bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
llval,
|
llval,
|
||||||
|
@ -188,7 +168,8 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
||||||
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
||||||
|
|
||||||
// Adjust pointer.
|
// Adjust pointer.
|
||||||
let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]);
|
// FIXME(erikdesjardins): should be able to use inbounds here too.
|
||||||
|
let ptr = bx.ptradd(self.llval, offset);
|
||||||
|
|
||||||
PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
|
PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,7 +190,12 @@ pub trait BuilderMethods<'a, 'tcx>:
|
||||||
ptr: Self::Value,
|
ptr: Self::Value,
|
||||||
indices: &[Self::Value],
|
indices: &[Self::Value],
|
||||||
) -> Self::Value;
|
) -> Self::Value;
|
||||||
fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
|
fn ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
|
||||||
|
self.gep(self.cx().type_i8(), ptr, &[offset])
|
||||||
|
}
|
||||||
|
fn inbounds_ptradd(&mut self, ptr: Self::Value, offset: Self::Value) -> Self::Value {
|
||||||
|
self.inbounds_gep(self.cx().type_i8(), ptr, &[offset])
|
||||||
|
}
|
||||||
|
|
||||||
fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||||
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
|
||||||
|
|
|
@ -113,7 +113,6 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
|
||||||
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
|
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
|
||||||
fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
|
fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
|
||||||
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
|
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
|
||||||
fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64;
|
|
||||||
fn scalar_pair_element_backend_type(
|
fn scalar_pair_element_backend_type(
|
||||||
&self,
|
&self,
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
|
|
|
@ -26,7 +26,6 @@ pub enum Enum64 {
|
||||||
B(i32),
|
B(i32),
|
||||||
}
|
}
|
||||||
// CHECK: %Enum64 = type { i32, [31 x i32] }
|
// CHECK: %Enum64 = type { i32, [31 x i32] }
|
||||||
// CHECK: %"Enum64::A" = type { [8 x i64], %Align64 }
|
|
||||||
|
|
||||||
// CHECK-LABEL: @align64
|
// CHECK-LABEL: @align64
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
|
|
|
@ -94,9 +94,9 @@ pub fn store_struct(x: &mut Struct) {
|
||||||
// CHECK-SAME: align 16 dereferenceable(32) %x
|
// CHECK-SAME: align 16 dereferenceable(32) %x
|
||||||
// CHECK: [[TMP:%.*]] = alloca %Struct, align 16
|
// CHECK: [[TMP:%.*]] = alloca %Struct, align 16
|
||||||
// CHECK: store i32 1, ptr [[TMP]], align 16
|
// CHECK: store i32 1, ptr [[TMP]], align 16
|
||||||
// CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds %Struct, ptr [[TMP]], i32 0, i32 1
|
// CHECK-NEXT: [[GEP1:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 4
|
||||||
// CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4
|
// CHECK-NEXT: store i32 2, ptr [[GEP1]], align 4
|
||||||
// CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds %Struct, ptr [[TMP]], i32 0, i32 3
|
// CHECK-NEXT: [[GEP2:%.*]] = getelementptr inbounds i8, ptr [[TMP]], i64 16
|
||||||
// CHECK-NEXT: store i128 3, ptr [[GEP2]], align 16
|
// CHECK-NEXT: store i128 3, ptr [[GEP2]], align 16
|
||||||
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %x, ptr align 16 [[TMP]], i64 32, i1 false)
|
// CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 %x, ptr align 16 [[TMP]], i64 32, i1 false)
|
||||||
*x = Struct { a: 1, b: 2, c: 3 };
|
*x = Struct { a: 1, b: 2, c: 3 };
|
||||||
|
|
|
@ -16,8 +16,8 @@ pub fn outer_function(x: S, y: S) -> usize {
|
||||||
// when generating debuginfo.
|
// when generating debuginfo.
|
||||||
// CHECK-LABEL: @outer_function
|
// CHECK-LABEL: @outer_function
|
||||||
// CHECK: [[spill:%.*]] = alloca %"{closure@{{.*.rs}}:9:23: 9:25}"
|
// CHECK: [[spill:%.*]] = alloca %"{closure@{{.*.rs}}:9:23: 9:25}"
|
||||||
// CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds %"{closure@{{.*.rs}}:9:23: 9:25}", ptr [[spill]]
|
// CHECK-NOT: [[ptr_tmp:%.*]] = getelementptr inbounds i8, ptr [[spill]]
|
||||||
// CHECK-NOT: [[load:%.*]] = load ptr, ptr
|
// CHECK-NOT: [[load:%.*]] = load ptr, ptr
|
||||||
// CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])
|
// CHECK: call void @llvm.lifetime.start{{.*}}({{.*}}, ptr [[spill]])
|
||||||
// CHECK: [[inner:%.*]] = getelementptr inbounds %"{{.*}}", ptr [[spill]]
|
// CHECK: [[inner:%.*]] = getelementptr inbounds i8, ptr [[spill]]
|
||||||
// CHECK: call void @llvm.memcpy{{.*}}(ptr {{align .*}} [[inner]], ptr {{align .*}} %x
|
// CHECK: call void @llvm.memcpy{{.*}}(ptr {{align .*}} [[inner]], ptr {{align .*}} %x
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
//! This test checks that match branches which all access a field
|
||||||
|
//! at the same offset are merged together.
|
||||||
|
//!
|
||||||
|
//@ compile-flags: -O
|
||||||
|
#![crate_type = "lib"]
|
||||||
|
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct A {
|
||||||
|
x: f64,
|
||||||
|
y: u64,
|
||||||
|
}
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct B {
|
||||||
|
x: f64,
|
||||||
|
y: u32,
|
||||||
|
}
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct C {
|
||||||
|
x: f64,
|
||||||
|
y: u16,
|
||||||
|
}
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct D {
|
||||||
|
x: f64,
|
||||||
|
y: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum E {
|
||||||
|
A(A),
|
||||||
|
B(B),
|
||||||
|
C(C),
|
||||||
|
D(D),
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-LABEL: @match_on_e
|
||||||
|
#[no_mangle]
|
||||||
|
pub fn match_on_e(e: &E) -> &f64 {
|
||||||
|
// CHECK: start:
|
||||||
|
// CHECK-NEXT: getelementptr
|
||||||
|
// CHECK-NEXT: ret
|
||||||
|
match e {
|
||||||
|
E::A(A { x, .. }) | E::B(B { x, .. }) | E::C(C { x, .. }) | E::D(D { x, .. }) => x,
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,7 +22,7 @@ pub fn scalar_layout(s: &(u64, ())) {
|
||||||
// CHECK-LABEL: @scalarpair_layout
|
// CHECK-LABEL: @scalarpair_layout
|
||||||
#[no_mangle]
|
#[no_mangle]
|
||||||
pub fn scalarpair_layout(s: &(u64, u32, ())) {
|
pub fn scalarpair_layout(s: &(u64, u32, ())) {
|
||||||
// CHECK: getelementptr inbounds i8, {{.+}}, [[USIZE]] 12
|
// CHECK: getelementptr i8, {{.+}}, [[USIZE]] 12
|
||||||
let x = &s.2;
|
let x = &s.2;
|
||||||
witness(&x); // keep variable in an alloca
|
witness(&x); // keep variable in an alloca
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue