[fir] Add fircg.ext_embox conversion

Convert a fircg.ext_embox operation to LLVM IR dialect.
A fircg.ext_embox is converted to a sequence of operation that
create, allocate if needed, and populate a descriptor.

This patch is part of the upstreaming effort from fir-dev branch.

Reviewed By: kiranchandramohan

Differential Revision: https://reviews.llvm.org/D114148

Co-authored-by: Eric Schweitz <eschweitz@nvidia.com>
Co-authored-by: Jean Perier <jperier@nvidia.com>
This commit is contained in:
Valentin Clement 2021-12-03 11:44:47 +01:00
parent 0bf2c87785
commit 1f55103263
No known key found for this signature in database
GPG Key ID: 086D54783C928776
4 changed files with 487 additions and 3 deletions

View File

@ -168,6 +168,14 @@ inline mlir::Type unwrapRefType(mlir::Type t) {
return t;
}
/// If `t` conforms with a pass-by-reference type (box, ref, ptr, etc.) then
/// return the element type of `t`. Otherwise, return `t`.
inline mlir::Type unwrapPassByRefType(mlir::Type t) {
if (auto eleTy = dyn_cast_ptrOrBoxEleTy(t))
return eleTy;
return t;
}
#ifndef NDEBUG
// !fir.ptr<X> and !fir.heap<X> where X is !fir.ptr, !fir.heap, or !fir.ref
// is undefined and disallowed.

View File

@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "flang/Optimizer/CodeGen/CodeGen.h"
#include "CGOps.h"
#include "PassDetail.h"
#include "flang/ISO_Fortran_binding.h"
#include "flang/Optimizer/Dialect/FIRAttr.h"
@ -1454,7 +1455,29 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
insertBaseAddress(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value dest,
mlir::Value base) const {
return insertField(rewriter, loc, dest, {0}, base, /*bitCast=*/true);
return insertField(rewriter, loc, dest, {kAddrPosInBox}, base,
/*bitCast=*/true);
}
inline mlir::Value insertLowerBound(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value dest,
unsigned dim, mlir::Value lb) const {
return insertField(rewriter, loc, dest,
{kDimsPosInBox, dim, kDimLowerBoundPos}, lb);
}
inline mlir::Value insertExtent(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value dest,
unsigned dim, mlir::Value extent) const {
return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimExtentPos},
extent);
}
inline mlir::Value insertStride(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value dest,
unsigned dim, mlir::Value stride) const {
return insertField(rewriter, loc, dest, {kDimsPosInBox, dim, kDimStridePos},
stride);
}
/// Get the address of the type descriptor global variable that was created by
@ -1545,6 +1568,23 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
return {boxTy, descriptor, eleSize};
}
/// Compute the base address of a substring given the base address of a scalar
/// string and the zero based string lower bound.
mlir::Value shiftSubstringBase(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value base,
mlir::Value lowerBound) const {
llvm::SmallVector<mlir::Value> gepOperands;
auto baseType =
base.getType().cast<mlir::LLVM::LLVMPointerType>().getElementType();
if (baseType.isa<mlir::LLVM::LLVMArrayType>()) {
auto idxTy = this->lowerTy().indexType();
mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
gepOperands.push_back(zero);
}
gepOperands.push_back(lowerBound);
return this->genGEP(loc, base.getType(), rewriter, base, gepOperands);
}
/// If the embox is not in a globalOp body, allocate storage for the box;
/// store the value inside and return the generated alloca. Return the input
/// value otherwise.
@ -1561,6 +1601,33 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
}
};
/// Compute the extent of a triplet slice (lb:ub:step).
static mlir::Value
computeTripletExtent(mlir::ConversionPatternRewriter &rewriter,
mlir::Location loc, mlir::Value lb, mlir::Value ub,
mlir::Value step, mlir::Value zero, mlir::Type type) {
mlir::Value extent = rewriter.create<mlir::LLVM::SubOp>(loc, type, ub, lb);
extent = rewriter.create<mlir::LLVM::AddOp>(loc, type, extent, step);
extent = rewriter.create<mlir::LLVM::SDivOp>(loc, type, extent, step);
// If the resulting extent is negative (`ub-lb` and `step` have different
// signs), zero must be returned instead.
auto cmp = rewriter.create<mlir::LLVM::ICmpOp>(
loc, mlir::LLVM::ICmpPredicate::sgt, extent, zero);
return rewriter.create<mlir::LLVM::SelectOp>(loc, cmp, extent, zero);
}
/// Helper function for generating the LLVM IR that computes the size
/// in bytes for a derived type.
static mlir::Value
computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy,
mlir::ConversionPatternRewriter &rewriter) {
auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
llvm::SmallVector<mlir::Value> args{nullPtr, one};
auto gep = rewriter.create<mlir::LLVM::GEPOp>(loc, ptrTy, args);
return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep);
}
/// Create a generic box on a memory reference. This conversions lowers the
/// abstract box to the appropriate, initialized descriptor.
struct EmboxOpConversion : public EmboxCommonConversion<fir::EmboxOp> {
@ -1599,6 +1666,172 @@ struct EmboxProcOpConversion : public FIROpConversion<fir::EmboxProcOp> {
}
};
/// Create a generic box on a memory reference.
struct XEmboxOpConversion : public EmboxCommonConversion<fir::cg::XEmboxOp> {
using EmboxCommonConversion::EmboxCommonConversion;
mlir::LogicalResult
matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
auto [boxTy, dest, eleSize] = consDescriptorPrefix(
xbox, rewriter, xbox.getOutRank(),
adaptor.getOperands().drop_front(xbox.lenParamOffset()));
// Generate the triples in the dims field of the descriptor
mlir::ValueRange operands = adaptor.getOperands();
auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64);
mlir::Value base = operands[0];
assert(!xbox.shape().empty() && "must have a shape");
unsigned shapeOffset = xbox.shapeOffset();
bool hasShift = !xbox.shift().empty();
unsigned shiftOffset = xbox.shiftOffset();
bool hasSlice = !xbox.slice().empty();
unsigned sliceOffset = xbox.sliceOffset();
mlir::Location loc = xbox.getLoc();
mlir::Value zero = genConstantIndex(loc, i64Ty, rewriter, 0);
mlir::Value one = genConstantIndex(loc, i64Ty, rewriter, 1);
mlir::Value prevDim = integerCast(loc, rewriter, i64Ty, eleSize);
mlir::Value prevPtrOff = one;
mlir::Type eleTy = boxTy.getEleTy();
const unsigned rank = xbox.getRank();
llvm::SmallVector<mlir::Value> gepArgs;
unsigned constRows = 0;
mlir::Value ptrOffset = zero;
if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()))
if (auto seqTy = memEleTy.dyn_cast<fir::SequenceType>()) {
mlir::Type seqEleTy = seqTy.getEleTy();
// Adjust the element scaling factor if the element is a dependent type.
if (fir::hasDynamicSize(seqEleTy)) {
if (fir::isa_char(seqEleTy)) {
assert(xbox.lenParams().size() == 1);
prevPtrOff = integerCast(loc, rewriter, i64Ty,
operands[xbox.lenParamOffset()]);
} else if (seqEleTy.isa<fir::RecordType>()) {
TODO(loc, "generate call to calculate size of PDT");
} else {
return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type");
}
} else {
constRows = seqTy.getConstantRows();
}
}
bool hasSubcomp = !xbox.subcomponent().empty();
mlir::Value stepExpr;
if (hasSubcomp) {
// We have a subcomponent. The step value needs to be the number of
// bytes per element (which is a derived type).
mlir::Type ty0 = base.getType();
[[maybe_unused]] auto ptrTy = ty0.dyn_cast<mlir::LLVM::LLVMPointerType>();
assert(ptrTy && "expected pointer type");
mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType());
assert(memEleTy && "expected fir pointer type");
auto seqTy = memEleTy.dyn_cast<fir::SequenceType>();
assert(seqTy && "expected sequence type");
mlir::Type seqEleTy = seqTy.getEleTy();
auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy));
stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter);
}
// Process the array subspace arguments (shape, shift, etc.), if any,
// translating everything to values in the descriptor wherever the entity
// has a dynamic array dimension.
for (unsigned di = 0, descIdx = 0; di < rank; ++di) {
mlir::Value extent = operands[shapeOffset];
mlir::Value outerExtent = extent;
bool skipNext = false;
if (hasSlice) {
mlir::Value off = operands[sliceOffset];
mlir::Value adj = one;
if (hasShift)
adj = operands[shiftOffset];
auto ao = rewriter.create<mlir::LLVM::SubOp>(loc, i64Ty, off, adj);
if (constRows > 0) {
gepArgs.push_back(ao);
--constRows;
} else {
auto dimOff =
rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, ao, prevPtrOff);
ptrOffset =
rewriter.create<mlir::LLVM::AddOp>(loc, i64Ty, dimOff, ptrOffset);
}
if (mlir::isa_and_nonnull<fir::UndefOp>(
xbox.slice()[3 * di + 1].getDefiningOp())) {
// This dimension contains a scalar expression in the array slice op.
// The dimension is loop invariant, will be dropped, and will not
// appear in the descriptor.
skipNext = true;
}
}
if (!skipNext) {
// store lower bound (normally 0)
mlir::Value lb = zero;
if (eleTy.isa<fir::PointerType>() || eleTy.isa<fir::HeapType>()) {
lb = one;
if (hasShift)
lb = operands[shiftOffset];
}
dest = insertLowerBound(rewriter, loc, dest, descIdx, lb);
// store extent
if (hasSlice)
extent = computeTripletExtent(rewriter, loc, operands[sliceOffset],
operands[sliceOffset + 1],
operands[sliceOffset + 2], zero, i64Ty);
dest = insertExtent(rewriter, loc, dest, descIdx, extent);
// store step (scaled by shaped extent)
mlir::Value step = hasSubcomp ? stepExpr : prevDim;
if (hasSlice)
step = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, step,
operands[sliceOffset + 2]);
dest = insertStride(rewriter, loc, dest, descIdx, step);
++descIdx;
}
// compute the stride and offset for the next natural dimension
prevDim =
rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevDim, outerExtent);
if (constRows == 0)
prevPtrOff = rewriter.create<mlir::LLVM::MulOp>(loc, i64Ty, prevPtrOff,
outerExtent);
// increment iterators
++shapeOffset;
if (hasShift)
++shiftOffset;
if (hasSlice)
sliceOffset += 3;
}
if (hasSlice || hasSubcomp || !xbox.substr().empty()) {
llvm::SmallVector<mlir::Value> args = {base, ptrOffset};
args.append(gepArgs.rbegin(), gepArgs.rend());
if (hasSubcomp) {
// For each field in the path add the offset to base via the args list.
// In the most general case, some offsets must be computed since
// they are not be known until runtime.
if (fir::hasDynamicSize(fir::unwrapSequenceType(
fir::unwrapPassByRefType(xbox.memref().getType()))))
TODO(loc, "fir.embox codegen dynamic size component in derived type");
args.append(operands.begin() + xbox.subcomponentOffset(),
operands.begin() + xbox.subcomponentOffset() +
xbox.subcomponent().size());
}
base = rewriter.create<mlir::LLVM::GEPOp>(loc, base.getType(), args);
if (!xbox.substr().empty())
base = shiftSubstringBase(rewriter, loc, base,
operands[xbox.substrOffset()]);
}
dest = insertBaseAddress(rewriter, loc, dest, base);
if (isDerivedTypeWithLenParams(boxTy))
TODO(loc, "fir.embox codegen of derived with length parameters");
mlir::Value result = placeInMemoryIfNotGlobalInit(rewriter, loc, dest);
rewriter.replaceOp(xbox, result);
return success();
}
};
// Code shared between insert_value and extract_value Ops.
struct ValueOpCommon {
// Translate the arguments pertaining to any multidimensional array to
@ -2198,8 +2431,8 @@ public:
ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion,
SliceOpConversion, StoreOpConversion, StringLitOpConversion,
SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion,
UndefOpConversion, UnreachableOpConversion, ZeroOpConversion>(
typeConverter);
UndefOpConversion, UnreachableOpConversion, XEmboxOpConversion,
ZeroOpConversion>(typeConverter);
mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern);
mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter,
pattern);

View File

@ -33,6 +33,10 @@ static constexpr unsigned kF18AddendumPosInBox = 6;
static constexpr unsigned kDimsPosInBox = 7;
static constexpr unsigned kOptTypePtrPosInBox = 8;
static constexpr unsigned kOptRowTypePosInBox = 9;
// Position of the different values in [dims]
static constexpr unsigned kDimLowerBoundPos = 0;
static constexpr unsigned kDimExtentPos = 1;
static constexpr unsigned kDimStridePos = 2;
namespace fir {

View File

@ -1589,3 +1589,242 @@ func @no_reassoc(%arg0: !fir.ref<i32>) {
// CHECK: %[[LOAD:.*]] = llvm.load %[[ARG0]] : !llvm.ptr<i32>
// CHECK: llvm.store %[[LOAD]], %[[ALLOC]] : !llvm.ptr<i32>
// CHECK: llvm.return
// -----
// Test `fircg.ext_embox` conversion.
// Check complete `fircg.ext_embox`.
func @xembox0(%arg0: !fir.ref<!fir.array<?xi32>>) {
%c0 = arith.constant 0 : i64
%0 = fircg.ext_embox %arg0(%c0) origin %c0[%c0, %c0, %c0] : (!fir.ref<!fir.array<?xi32>>, i64, i64, i64, i64, i64) -> !fir.box<!fir.array<?xi32>>
return
}
// CHECK-LABEL: llvm.func @xembox0(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<i32>
// CHECK: %[[ALLOCA_SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ELEM_LEN:.*]] = llvm.mlir.constant(4 : i32) : i32
// CHECK: %[[TYPE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
// CHECK: %[[BOX2:.*]] = llvm.insertvalue %[[VERSION]], %[[BOX1]][2 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[RANK:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[RANK_I8:.*]] = llvm.trunc %[[RANK]] : i32 to i8
// CHECK: %[[BOX3:.*]] = llvm.insertvalue %[[RANK_I8]], %[[BOX2]][3 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[TYPE_I8:.*]] = llvm.trunc %[[TYPE]] : i32 to i8
// CHECK: %[[BOX4:.*]] = llvm.insertvalue %[[TYPE_I8]], %[[BOX3]][4 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ATTR:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[ATTR_I8:.*]] = llvm.trunc %[[ATTR]] : i32 to i8
// CHECK: %[[BOX5:.*]] = llvm.insertvalue %[[ATTR_I8]], %[[BOX4]][5 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[F18ADDENDUM:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8
// CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C0]], %[[C0]] : i64
// CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[ONE]] : i64
// CHECK: %[[PTR_OFFSET:.*]] = llvm.add %[[DIM_OFFSET]], %[[ZERO]] : i64
// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[EXTENT0:.*]] = llvm.sub %[[C0]], %[[C0]] : i64
// CHECK: %[[EXTENT1:.*]] = llvm.add %[[EXTENT0]], %[[C0]] : i64
// CHECK: %[[EXTENT2:.*]] = llvm.sdiv %[[EXTENT1]], %[[C0]] : i64
// CHECK: %[[EXTENT_CMP:.*]] = llvm.icmp "sgt" %[[EXTENT2]], %[[ZERO]] : i64
// CHECK: %[[EXTENT:.*]] = llvm.select %[[EXTENT_CMP]], %[[EXTENT2]], %[[ZERO]] : i1, i64
// CHECK: %[[BOX8:.*]] = llvm.insertvalue %[[EXTENT]], %[[BOX7]][7 : i32, 0 : i32, 1 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[STRIDE:.*]] = llvm.mul %[[ELEM_LEN_I64]], %[[C0]] : i64
// CHECK: %[[BOX9:.*]] = llvm.insertvalue %[[STRIDE]], %[[BOX8]][7 : i32, 0 : i32, 2 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[PREV_DIM:.*]] = llvm.mul %[[ELEM_LEN_I64]], %[[C0]] : i64
// CHECK: %[[PREV_PTROFF:.*]] = llvm.mul %[[ONE]], %[[C0]] : i64
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[PTR_OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
// CHECK: %[[ADDR_BITCAST:.*]] = llvm.bitcast %[[BASE_PTR]] : !llvm.ptr<i32> to !llvm.ptr<i32>
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[ADDR_BITCAST]], %[[BOX9]][0 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: llvm.store %[[BOX10]], %[[ALLOCA]] : !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// Check adjustment of element scaling factor.
func @xembox1(%arg0: !fir.ref<!fir.array<?x!fir.char<1, 10>>>) {
%c0 = arith.constant 0 : i64
%0 = fircg.ext_embox %arg0(%c0) origin %c0[%c0, %c0, %c0] : (!fir.ref<!fir.array<?x!fir.char<1, 10>>>, i64, i64, i64, i64, i64) -> !fir.box<!fir.array<?x!fir.char<1, 10>>>
return
}
// CHECK-LABEL: llvm.func @xembox1(%{{.*}}: !llvm.ptr<array<10 x i8>>) {
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[ELEM_LEN:.*]] = llvm.mlir.constant(10 : i32) : i32
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %{{.*}} = llvm.insertvalue %[[ELEM_LEN_I64]], %{{.*}}[1 : i32] : !llvm.struct<(ptr<array<10 x i8>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[PTR_OFFSET:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[PREV_PTROFF:.*]] = llvm.mul %[[PTR_OFFSET]], %[[C0]] : i64
// Fortran realistic use case extracted from the following snippet:
//
// ```
// subroutine sb(n,sh1,sh2)
// integer::n,sh1,sh2
// double precision::arr(sh1:n,sh2:n)
// call xb(arr(2:n,4:n))
// end subroutine
// ```
// N is the upperbound, sh1 and sh2 are the shifts or lowerbounds
func @_QPsb(%N: index, %sh1: index, %sh2: index) {
%c4 = arith.constant 4 : index
%c1 = arith.constant 1 : index
%c2 = arith.constant 2 : index
// Calculate nelems in dim1
%n1_tmp = arith.subi %N, %sh1 : index
%n1 = arith.addi %n1_tmp, %c1 : index
// Calculate nelems in dim2
%n2_tmp = arith.subi %N, %sh2 : index
%n2 = arith.addi %n2_tmp, %c1 : index
%arr = fir.alloca !fir.array<?x?xf64>, %n1, %n2 {bindc_name = "arr", uniq_name = "_QFsbEarr"}
%box = fircg.ext_embox %arr(%n1, %n2) origin %sh1, %sh2[%c2, %N, %c1, %c4, %N, %c1] : (!fir.ref<!fir.array<?x?xf64>>, index, index, index, index, index, index, index, index, index, index) -> !fir.box<!fir.array<?x?xf64>>
fir.call @_QPxb(%box) : (!fir.box<!fir.array<?x?xf64>>) -> ()
return
}
func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
// CHECK-LABEL: llvm.func @_QPsb(
// CHECK-SAME: %[[N:.*]]: i64, %[[SH1:.*]]: i64, %[[SH2:.*]]: i64) {
// CHECK: %[[ALLOCA_SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>
// CHECK: %[[C4:.*]] = llvm.mlir.constant(4 : index) : i64
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : index) : i64
// CHECK: %[[N1_TMP:.*]] = llvm.sub %[[N]], %[[SH1]] : i64
// CHECK: %[[N1:.*]] = llvm.add %[[N1_TMP]], %[[C1]] : i64
// CHECK: %[[N2_TMP:.*]] = llvm.sub %[[N]], %[[SH2]] : i64
// CHECK: %[[N2:.*]] = llvm.add %[[N2_TMP]], %[[C1]] : i64
// CHECK: %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[C1_1:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ARR_SIZE_TMP0:.*]] = llvm.mul %[[C1_0]], %[[C1_1]] : i64
// CHECK: %[[ARR_SIZE_TMP1:.*]] = llvm.mul %[[ARR_SIZE_TMP0]], %[[N1]] : i64
// CHECK: %[[ARR_SIZE:.*]] = llvm.mul %[[ARR_SIZE_TMP1]], %[[N2]] : i64
// CHECK: %[[ARR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr", in_type = !fir.array<?x?xf64>, operand_segment_sizes = dense<[0, 2]> : vector<2xi32>, uniq_name = "_QFsbEarr"} : (i64) -> !llvm.ptr<f64>
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[ELEM_LEN:.*]] = llvm.mlir.constant(8 : i32) : i32
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(26 : i32) : i32
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
// CHECK: %[[BOX2:.*]] = llvm.insertvalue %[[VERSION]], %[[BOX1]][2 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[RANK:.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: %[[RANK_I8:.*]] = llvm.trunc %[[RANK]] : i32 to i8
// CHECK: %[[BOX3:.*]] = llvm.insertvalue %[[RANK_I8]], %[[BOX2]][3 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[TYPE_I8:.*]] = llvm.trunc %[[TYPE_CODE]] : i32 to i8
// CHECK: %[[BOX4:.*]] = llvm.insertvalue %[[TYPE_I8]], %[[BOX3]][4 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[ATTR:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[ATTR_I8:.*]] = llvm.trunc %[[ATTR]] : i32 to i8
// CHECK: %[[BOX5:.*]] = llvm.insertvalue %[[ATTR_I8]], %[[BOX4]][5 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[F18ADDENDUM:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8
// CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C2]], %[[SH1]] : i64
// CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[ONE]] : i64
// CHECK: %[[PTR_OFFSET:.*]] = llvm.add %[[DIM_OFFSET]], %[[ZERO]] : i64
// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[EXTENT0:.*]] = llvm.sub %[[ARG0]], %[[C2]] : i64
// CHECK: %[[EXTENT1:.*]] = llvm.add %[[EXTENT0]], %[[C1]] : i64
// CHECK: %[[EXTENT2:.*]] = llvm.sdiv %[[EXTENT1]], %[[C1]] : i64
// CHECK: %[[EXTENT_CMP:.*]] = llvm.icmp "sgt" %[[EXTENT2]], %[[ZERO]] : i64
// CHECK: %[[EXTENT:.*]] = llvm.select %[[EXTENT_CMP]], %[[EXTENT2]], %[[ZERO]] : i1, i64
// CHECK: %[[BOX8:.*]] = llvm.insertvalue %[[EXTENT]], %[[BOX7]][7 : i32, 0 : i32, 1 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[STRIDE:.*]] = llvm.mul %[[ELEM_LEN_I64]], %[[C1]] : i64
// CHECK: %[[BOX9:.*]] = llvm.insertvalue %[[STRIDE]], %[[BOX8]][7 : i32, 0 : i32, 2 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[PREV_DIM:.*]] = llvm.mul %[[ELEM_LEN_I64]], %[[N1]] : i64
// CHECK: %[[PREV_PTROFF:.*]] = llvm.mul %[[ONE]], %[[N1]] : i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C4]], %[[SH2]] : i64
// CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[PREV_PTROFF]] : i64
// CHECK: %[[PTR_OFFSET0:.*]] = llvm.add %[[DIM_OFFSET]], %[[PTR_OFFSET]] : i64
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX9]][7 : i32, 1 : i32, 0 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[N]], %[[C4]] : i64
// CHECK: %[[EXT_ADD:.*]] = llvm.add %[[EXT_SUB]], %[[C1]] : i64
// CHECK: %[[EXT_SDIV:.*]] = llvm.sdiv %[[EXT_ADD]], %[[C1]] : i64
// CHECK: %[[EXT_ICMP:.*]] = llvm.icmp "sgt" %[[EXT_SDIV]], %[[ZERO]] : i64
// CHECK: %[[EXT_SELECT:.*]] = llvm.select %[[EXT_ICMP]], %[[EXT_SDIV]], %[[ZERO]] : i1, i64
// CHECK: %[[BOX11:.*]] = llvm.insertvalue %[[EXT_SELECT]], %[[BOX10]][7 : i32, 1 : i32, 1 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[STRIDE_MUL:.*]] = llvm.mul %[[PREV_DIM]], %[[C1]] : i64
// CHECK: %[[BOX12:.*]] = llvm.insertvalue %[[STRIDE_MUL]], %[[BOX11]][7 : i32, 1 : i32, 2 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[ARR]][%[[PTR_OFFSET0]]] : (!llvm.ptr<f64>, i64) -> !llvm.ptr<f64>
// CHECK: %[[ADDR_BITCAST:.*]] = llvm.bitcast %[[BASE_PTR]] : !llvm.ptr<f64> to !llvm.ptr<f64>
// CHECK: %[[BOX13:.*]] = llvm.insertvalue %[[ADDR_BITCAST]], %[[BOX12]][0 : i32] : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: llvm.store %[[BOX13]], %[[ALLOCA]] : !llvm.ptr<struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>
// Conversion with a subcomponent.
func @_QPtest_dt_slice() {
%c20 = arith.constant 20 : index
%c1_i64 = arith.constant 1 : i64
%c10_i64 = arith.constant 10 : i64
%c2_i64 = arith.constant 2 : i64
%0 = fir.alloca i32 {bindc_name = "v", uniq_name = "_QFtest_dt_sliceEv"}
%1 = fir.alloca !fir.array<20x!fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>> {bindc_name = "x", uniq_name = "_QFtest_dt_sliceEx"}
%2 = fir.field_index i, !fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>
%5 = fircg.ext_embox %1(%c20)[%c1_i64, %c10_i64, %c2_i64] path %2 : (!fir.ref<!fir.array<20x!fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>>>, index, i64, i64, i64, !fir.field) -> !fir.box<!fir.array<?xi32>>
fir.call @_QPtest_dt_callee(%5) : (!fir.box<!fir.array<?xi32>>) -> ()
return
}
func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// CHECK-LABEL: llvm.func @_QPtest_dt_slice
// CHECK: %[[ALLOCA_SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// CHECK: %[[C20:.*]] = llvm.mlir.constant(20 : index) : i64
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[C10:.*]] = llvm.mlir.constant(10 : i64) : i64
// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i64) : i64
// CHECK: %[[ALLOCA_SIZE_V:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[V:.*]] = llvm.alloca %[[ALLOCA_SIZE_V]] x i32 {bindc_name = "v", in_type = i32, operand_segment_sizes = dense<0> : vector<2xi32>, uniq_name = "_QFtest_dt_sliceEv"} : (i64) -> !llvm.ptr<i32>
// CHECK: %[[ALLOCA_SIZE_X:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[X:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x", in_type = !fir.array<20x!fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>>, operand_segment_sizes = dense<0> : vector<2xi32>, uniq_name = "_QFtest_dt_sliceEx"} : (i64) -> !llvm.ptr<array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>>
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ELEM_LEN:.*]] = llvm.mlir.constant(4 : i32) : i32
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
// CHECK: %[[BOX2:.*]] = llvm.insertvalue %[[VERSION]], %[[BOX1]][2 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[RANK:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[RANK_I8:.*]] = llvm.trunc %[[RANK]] : i32 to i8
// CHECK: %[[BOX3:.*]] = llvm.insertvalue %[[RANK_I8]], %[[BOX2]][3 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[TYPE_CODE_I8:.*]] = llvm.trunc %[[TYPE_CODE]] : i32 to i8
// CHECK: %[[BOX4:.*]] = llvm.insertvalue %[[TYPE_CODE_I8]], %[[BOX3]][4 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ATTR:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[ATTR_I8:.*]] = llvm.trunc %[[ATTR]] : i32 to i8
// CHECK: %[[BOX5:.*]] = llvm.insertvalue %[[ATTR_I8]], %[[BOX4]][5 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[F18ADDENDUM:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8
// CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64
// CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.null : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
// CHECK: %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][%[[C1_0]]] : (!llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>, i64) -> !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
// CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>> to i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %3, %30 : i64
// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[C10]], %[[C1]] : i64
// CHECK: %[[EXT_ADD:.*]] = llvm.add %[[EXT_SUB]], %[[C2]] : i64
// CHECK: %[[EXT_SDIV:.*]] = llvm.sdiv %[[EXT_ADD]], %[[C2]] : i64
// CHECK: %[[EXT_ICMP:.*]] = llvm.icmp "sgt" %[[EXT_SDIV]], %[[ZERO]] : i64
// CHECK: %[[EXT_SELECT:.*]] = llvm.select %[[EXT_ICMP]], %[[EXT_SDIV]], %[[ZERO]] : i1, i64
// CHECK: %[[BOX8:.*]] = llvm.insertvalue %[[EXT_SELECT]], %[[BOX7]][7 : i32, 0 : i32, 1 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[STRIDE_MUL:.*]] = llvm.mul %[[PTRTOINT_DTYPE_SIZE]], %[[C2]] : i64
// CHECK: %[[BOX9:.*]] = llvm.insertvalue %[[STRIDE_MUL]], %[[BOX8]][7 : i32, 0 : i32, 2 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[X]][%[[ZERO]], %[[ADJUSTED_OFFSET]], %[[C0]]] : (!llvm.ptr<array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>>, i64, i64, i32) -> !llvm.ptr<array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>>
// CHECK: %[[ADDR_BITCAST:.*]] = llvm.bitcast %[[BASE_PTR]] : !llvm.ptr<array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>> to !llvm.ptr<i32>
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[ADDR_BITCAST]], %[[BOX9]][0 : i32] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: llvm.store %[[BOX10]], %[[ALLOCA]] : !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
// CHECK: llvm.call @_QPtest_dt_callee(%1) : (!llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> ()