static constexpr unsigned kAttrPointer = CFI_attribute_pointer;
static constexpr unsigned kAttrAllocatable = CFI_attribute_allocatable;
+static mlir::Type getVoidPtrType(mlir::MLIRContext *context) {
+ return mlir::LLVM::LLVMPointerType::get(mlir::IntegerType::get(context, 8));
+}
+
static mlir::LLVM::ConstantOp
genConstantIndex(mlir::Location loc, mlir::Type ity,
mlir::ConversionPatternRewriter &rewriter,
}
};
+/// Create a new box given a box reference.
+struct XReboxOpConversion : public EmboxCommonConversion<fir::cg::XReboxOp> {
+ using EmboxCommonConversion::EmboxCommonConversion;
+
+ mlir::LogicalResult
+ matchAndRewrite(fir::cg::XReboxOp rebox, OpAdaptor adaptor,
+ mlir::ConversionPatternRewriter &rewriter) const override {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Type idxTy = lowerTy().indexType();
+ mlir::Value loweredBox = adaptor.getOperands()[0];
+ mlir::ValueRange operands = adaptor.getOperands();
+
+ // Create new descriptor and fill its non-shape related data.
+ llvm::SmallVector<mlir::Value, 2> lenParams;
+ mlir::Type inputEleTy = getInputEleTy(rebox);
+ if (auto charTy = inputEleTy.dyn_cast<fir::CharacterType>()) {
+ mlir::Value len =
+ loadElementSizeFromBox(loc, idxTy, loweredBox, rewriter);
+ if (charTy.getFKind() != 1) {
+ mlir::Value width =
+ genConstantIndex(loc, idxTy, rewriter, charTy.getFKind());
+ len = rewriter.create<mlir::LLVM::SDivOp>(loc, idxTy, len, width);
+ }
+ lenParams.emplace_back(len);
+ } else if (auto recTy = inputEleTy.dyn_cast<fir::RecordType>()) {
+ if (recTy.getNumLenParams() != 0)
+ TODO(loc, "reboxing descriptor of derived type with length parameters");
+ }
+ auto [boxTy, dest, eleSize] =
+ consDescriptorPrefix(rebox, rewriter, rebox.getOutRank(), lenParams);
+
+ // Read input extents, strides, and base address
+ llvm::SmallVector<mlir::Value> inputExtents;
+ llvm::SmallVector<mlir::Value> inputStrides;
+ const unsigned inputRank = rebox.getRank();
+ for (unsigned i = 0; i < inputRank; ++i) {
+ mlir::Value dim = genConstantIndex(loc, idxTy, rewriter, i);
+ SmallVector<mlir::Value, 3> dimInfo =
+ getDimsFromBox(loc, {idxTy, idxTy, idxTy}, loweredBox, dim, rewriter);
+ inputExtents.emplace_back(dimInfo[1]);
+ inputStrides.emplace_back(dimInfo[2]);
+ }
+
+ mlir::Type baseTy = getBaseAddrTypeFromBox(loweredBox.getType());
+ mlir::Value baseAddr =
+ loadBaseAddrFromBox(loc, baseTy, loweredBox, rewriter);
+
+ if (!rebox.slice().empty() || !rebox.subcomponent().empty())
+ return sliceBox(rebox, dest, baseAddr, inputExtents, inputStrides,
+ operands, rewriter);
+ return reshapeBox(rebox, dest, baseAddr, inputExtents, inputStrides,
+ operands, rewriter);
+ }
+
+private:
+ /// Write resulting shape and base address in descriptor, and replace rebox
+ /// op.
+ mlir::LogicalResult
+ finalizeRebox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange lbounds, mlir::ValueRange extents,
+ mlir::ValueRange strides,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Value one = genConstantIndex(loc, lowerTy().indexType(), rewriter, 1);
+ for (auto iter : llvm::enumerate(llvm::zip(extents, strides))) {
+ unsigned dim = iter.index();
+ mlir::Value lb = lbounds.empty() ? one : lbounds[dim];
+ dest = insertLowerBound(rewriter, loc, dest, dim, lb);
+ dest = insertExtent(rewriter, loc, dest, dim, std::get<0>(iter.value()));
+ dest = insertStride(rewriter, loc, dest, dim, std::get<1>(iter.value()));
+ }
+ dest = insertBaseAddress(rewriter, loc, dest, base);
+ mlir::Value result =
+ placeInMemoryIfNotGlobalInit(rewriter, rebox.getLoc(), dest);
+ rewriter.replaceOp(rebox, result);
+ return success();
+ }
+
+ // Apply slice given the base address, extents and strides of the input box.
+ mlir::LogicalResult
+ sliceBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
+ mlir::ValueRange operands,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::Location loc = rebox.getLoc();
+ mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
+ mlir::Type idxTy = lowerTy().indexType();
+ mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0);
+ // Apply subcomponent and substring shift on base address.
+ if (!rebox.subcomponent().empty() || !rebox.substr().empty()) {
+ // Cast to inputEleTy* so that a GEP can be used.
+ mlir::Type inputEleTy = getInputEleTy(rebox);
+ auto llvmElePtrTy =
+ mlir::LLVM::LLVMPointerType::get(convertType(inputEleTy));
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, llvmElePtrTy, base);
+
+ if (!rebox.subcomponent().empty()) {
+ llvm::SmallVector<mlir::Value> gepOperands = {zero};
+ for (unsigned i = 0; i < rebox.subcomponent().size(); ++i)
+ gepOperands.push_back(operands[rebox.subcomponentOffset() + i]);
+ base = genGEP(loc, llvmElePtrTy, rewriter, base, gepOperands);
+ }
+ if (!rebox.substr().empty())
+ base = shiftSubstringBase(rewriter, loc, base,
+ operands[rebox.substrOffset()]);
+ }
+
+ if (rebox.slice().empty())
+ // The array section is of the form array[%component][substring], keep
+ // the input array extents and strides.
+ return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
+ inputExtents, inputStrides, rewriter);
+
+ // Strides from the fir.box are in bytes.
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+
+ // The slice is of the form array(i:j:k)[%component]. Compute new extents
+ // and strides.
+ llvm::SmallVector<mlir::Value> slicedExtents;
+ llvm::SmallVector<mlir::Value> slicedStrides;
+ mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1);
+ const bool sliceHasOrigins = !rebox.shift().empty();
+ unsigned sliceOps = rebox.sliceOffset();
+ unsigned shiftOps = rebox.shiftOffset();
+ auto strideOps = inputStrides.begin();
+ const unsigned inputRank = inputStrides.size();
+ for (unsigned i = 0; i < inputRank;
+ ++i, ++strideOps, ++shiftOps, sliceOps += 3) {
+ mlir::Value sliceLb =
+ integerCast(loc, rewriter, idxTy, operands[sliceOps]);
+ mlir::Value inputStride = *strideOps; // already idxTy
+ // Apply origin shift: base += (lb-shift)*input_stride
+ mlir::Value sliceOrigin =
+ sliceHasOrigins
+ ? integerCast(loc, rewriter, idxTy, operands[shiftOps])
+ : one;
+ mlir::Value diff =
+ rewriter.create<mlir::LLVM::SubOp>(loc, idxTy, sliceLb, sliceOrigin);
+ mlir::Value offset =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, diff, inputStride);
+ base = genGEP(loc, voidPtrTy, rewriter, base, offset);
+ // Apply upper bound and step if this is a triplet. Otherwise, the
+ // dimension is dropped and no extents/strides are computed.
+ mlir::Value upper = operands[sliceOps + 1];
+ const bool isTripletSlice =
+ !mlir::isa_and_nonnull<mlir::LLVM::UndefOp>(upper.getDefiningOp());
+ if (isTripletSlice) {
+ mlir::Value step =
+ integerCast(loc, rewriter, idxTy, operands[sliceOps + 2]);
+ // extent = ub-lb+step/step
+ mlir::Value sliceUb = integerCast(loc, rewriter, idxTy, upper);
+ mlir::Value extent = computeTripletExtent(rewriter, loc, sliceLb,
+ sliceUb, step, zero, idxTy);
+ slicedExtents.emplace_back(extent);
+ // stride = step*input_stride
+ mlir::Value stride =
+ rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, step, inputStride);
+ slicedStrides.emplace_back(stride);
+ }
+ }
+ return finalizeRebox(rebox, dest, base, /*lbounds*/ llvm::None,
+ slicedExtents, slicedStrides, rewriter);
+ }
+
+ /// Apply a new shape to the data described by a box given the base address,
+ /// extents and strides of the box.
+ mlir::LogicalResult
+ reshapeBox(fir::cg::XReboxOp rebox, mlir::Value dest, mlir::Value base,
+ mlir::ValueRange inputExtents, mlir::ValueRange inputStrides,
+ mlir::ValueRange operands,
+ mlir::ConversionPatternRewriter &rewriter) const {
+ mlir::ValueRange reboxShifts{operands.begin() + rebox.shiftOffset(),
+ operands.begin() + rebox.shiftOffset() +
+ rebox.shift().size()};
+ if (rebox.shape().empty()) {
+ // Only setting new lower bounds.
+ return finalizeRebox(rebox, dest, base, reboxShifts, inputExtents,
+ inputStrides, rewriter);
+ }
+
+ mlir::Location loc = rebox.getLoc();
+ // Strides from the fir.box are in bytes.
+ mlir::Type voidPtrTy = ::getVoidPtrType(rebox.getContext());
+ base = rewriter.create<mlir::LLVM::BitcastOp>(loc, voidPtrTy, base);
+
+ llvm::SmallVector<mlir::Value> newStrides;
+ llvm::SmallVector<mlir::Value> newExtents;
+ mlir::Type idxTy = lowerTy().indexType();
+ // First stride from input box is kept. The rest is assumed contiguous
+ // (it is not possible to reshape otherwise). If the input is scalar,
+ // which may be OK if all new extents are ones, the stride does not
+ // matter, use one.
+ mlir::Value stride = inputStrides.empty()
+ ? genConstantIndex(loc, idxTy, rewriter, 1)
+ : inputStrides[0];
+ for (unsigned i = 0; i < rebox.shape().size(); ++i) {
+ mlir::Value rawExtent = operands[rebox.shapeOffset() + i];
+ mlir::Value extent = integerCast(loc, rewriter, idxTy, rawExtent);
+ newExtents.emplace_back(extent);
+ newStrides.emplace_back(stride);
+ // nextStride = extent * stride;
+ stride = rewriter.create<mlir::LLVM::MulOp>(loc, idxTy, extent, stride);
+ }
+ return finalizeRebox(rebox, dest, base, reboxShifts, newExtents, newStrides,
+ rewriter);
+ }
+
+ /// Return scalar element type of the input box.
+ static mlir::Type getInputEleTy(fir::cg::XReboxOp rebox) {
+ auto ty = fir::dyn_cast_ptrOrBoxEleTy(rebox.box().getType());
+ if (auto seqTy = ty.dyn_cast<fir::SequenceType>())
+ return seqTy.getEleTy();
+ return ty;
+ }
+};
+
// Code shared between insert_value and extract_value Ops.
struct ValueOpCommon {
// Translate the arguments pertaining to any multidimensional array to
SliceOpConversion, StoreOpConversion, StringLitOpConversion,
SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion,
UndefOpConversion, UnreachableOpConversion, XArrayCoorOpConversion,
- XEmboxOpConversion, ZeroOpConversion>(typeConverter);
+ XEmboxOpConversion, XReboxOpConversion, ZeroOpConversion>(
+ typeConverter);
mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern);
mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter,
pattern);
// CHECK: %[[OFFSET:.*]] = llvm.add %[[STRIDE]], %[[C0_1]] : i64
// CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr<array<100 x i32>> to !llvm.ptr<i32>
// CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr<i32>, i64) -> !llvm.ptr<i32>
+
+// -----
+
+// Check `fircg.ext_rebox` conversion to LLVM IR dialect
+
+// Test applying slice on fir.box. Note that the slice is 1D where as the array is 2D.
+// subroutine foo(x)
+// real :: x(3:, 4:)
+// call bar(x(5, 6:80:3))
+// end subroutine
+
+func private @bar1(!fir.box<!fir.array<?xf32>>)
+func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
+ %c2 = arith.constant 2 : index
+ %c3 = arith.constant 3 : index
+ %c4 = arith.constant 4 : index
+ %c5 = arith.constant 5 : index
+ %c6 = arith.constant 6 : index
+ %c80 = arith.constant 80 : index
+ %0 = fir.undefined index
+ %3 = fircg.ext_rebox %arg0 origin %c3, %c4[%c5, %0, %0, %c6, %c80, %c3] : (!fir.box<!fir.array<?x?xf32>>, index, index, index, index, index, index, index, index) -> !fir.box<!fir.array<?xf32>>
+ fir.call @bar1(%3) : (!fir.box<!fir.array<?xf32>>) -> ()
+ return
+}
+//CHECK-LABEL: llvm.func @bar1
+//CHECK-LABEL: llvm.func @test_rebox_1
+//CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>
+//CHECK: %[[ONE_1:.*]] = llvm.mlir.constant(1 : i32) : i32
+//CHECK: %[[RESULT_BOX_REF:.*]] = llvm.alloca %[[ONE_1]] x !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+//CHECK: %[[THREE:.*]] = llvm.mlir.constant(3 : index) : i64
+//CHECK: %[[FOUR:.*]] = llvm.mlir.constant(4 : index) : i64
+//CHECK: %[[FIVE:.*]] = llvm.mlir.constant(5 : index) : i64
+//CHECK: %[[SIX:.*]] = llvm.mlir.constant(6 : index) : i64
+//CHECK: %[[EIGHTY:.*]] = llvm.mlir.constant(80 : index) : i64
+//CHECK: %[[RBOX:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[ELEM_SIZE:.*]] = llvm.mlir.constant(4 : i32) : i32
+//CHECK: %[[FLOAT_TYPE:.*]] = llvm.mlir.constant(25 : i32) : i32
+//CHECK: %[[ELEM_SIZE_I64:.*]] = llvm.sext %[[ELEM_SIZE]] : i32 to i64
+//CHECK: %[[RBOX_TMP1:.*]] = llvm.insertvalue %[[ELEM_SIZE_I64]], %[[RBOX]][1 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[CFI_VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
+//CHECK: %[[RBOX_TMP2:.*]] = llvm.insertvalue %[[CFI_VERSION]], %[[RBOX_TMP1]][2 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RANK:.*]] = llvm.mlir.constant(1 : i32) : i32
+//CHECK: %[[RANK_I8:.*]] = llvm.trunc %[[RANK]] : i32 to i8
+//CHECK: %[[RBOX_TMP3:.*]] = llvm.insertvalue %[[RANK_I8]], %[[RBOX_TMP2]][3 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[FLOAT_TYPE_I8:.*]] = llvm.trunc %[[FLOAT_TYPE]] : i32 to i8
+//CHECK: %[[RBOX_TMP4:.*]] = llvm.insertvalue %[[FLOAT_TYPE_I8]], %[[RBOX_TMP3]][4 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[OTHER_ATTR:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[OTHER_ATTR_I8:.*]] = llvm.trunc %[[OTHER_ATTR]] : i32 to i8
+//CHECK: %[[RBOX_TMP5:.*]] = llvm.insertvalue %[[OTHER_ATTR_I8]], %[[RBOX_TMP4]][5 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[ADDENDUM:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
+//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64
+//CHECK: %[[GEP_ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[DIM_IDX_1:.*]] = llvm.mlir.constant(7 : i32) : i32
+//CHECK: %[[LB1_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
+//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_1]], %[[DIM_IDX_1]], %[[DIM1]], %[[LB1_IDX]]] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[DIM1_STRIDE:.*]] = llvm.load %[[DIM1_STRIDE_REF]] : !llvm.ptr<i64>
+//CHECK: %[[DIM2:.*]] = llvm.mlir.constant(1 : i64) : i64
+//CHECK: %[[GEP_ZERO_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[DIM_IDX_2:.*]] = llvm.mlir.constant(7 : i32) : i32
+//CHECK: %[[STRIDE2_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
+//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_2]], %[[DIM_IDX_2]], %[[DIM2]], %[[STRIDE2_IDX]]] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[DIM2_STRIDE:.*]] = llvm.load %[[DIM2_STRIDE_REF]] : !llvm.ptr<i64>
+//CHECK: %[[ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[ZERO_2:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_1]], %[[ZERO_2]]] : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i32) -> !llvm.ptr<ptr<f32>>
+//CHECK: %[[SOURCE_ARRAY:.*]] = llvm.load %[[SOURCE_ARRAY_PTR]] : !llvm.ptr<ptr<f32>>
+//CHECK: %[[ZERO_ELEMS:.*]] = llvm.mlir.constant(0 : i64) : i64
+//CHECK: %[[SOURCE_ARRAY_I8PTR:.*]] = llvm.bitcast %[[SOURCE_ARRAY]] : !llvm.ptr<f32> to !llvm.ptr<i8>
+//CHECK: %[[DIM1_LB_DIFF:.*]] = llvm.sub %[[FIVE]], %[[THREE]] : i64
+//CHECK: %[[DIM1_LB_OFFSET:.*]] = llvm.mul %[[DIM1_LB_DIFF]], %[[DIM1_STRIDE]] : i64
+//CHECK: %[[RESULT_PTR_DIM1:.*]] = llvm.getelementptr %[[SOURCE_ARRAY_I8PTR]][%[[DIM1_LB_OFFSET]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
+//CHECK: %[[DIM2_LB_DIFF:.*]] = llvm.sub %[[SIX]], %[[FOUR]] : i64
+//CHECK: %[[DIM2_LB_OFFSET:.*]] = llvm.mul %[[DIM2_LB_DIFF]], %[[DIM2_STRIDE]] : i64
+//CHECK: %[[RESULT_PTR_I8:.*]] = llvm.getelementptr %[[RESULT_PTR_DIM1]][%[[DIM2_LB_OFFSET]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
+//CHECK: %[[RESULT_UB_LB_DIFF:.*]] = llvm.sub %[[EIGHTY]], %[[SIX]] : i64
+//CHECK: %[[RESULT_UB_LB_DIFF_PLUS_STRIDE:.*]] = llvm.add %[[RESULT_UB_LB_DIFF]], %[[THREE]] : i64
+//CHECK: %[[RESULT_NELEMS_TMP:.*]] = llvm.sdiv %[[RESULT_UB_LB_DIFF_PLUS_STRIDE]], %[[THREE]] : i64
+//CHECK: %[[RESULT_IF_NON_ZERO:.*]] = llvm.icmp "sgt" %[[RESULT_NELEMS_TMP]], %[[ZERO_ELEMS]] : i64
+//CHECK: %[[RESULT_NELEMS:.*]] = llvm.select %[[RESULT_IF_NON_ZERO]], %[[RESULT_NELEMS_TMP]], %[[ZERO_ELEMS]] : i1, i64
+//CHECK: %[[RESULT_STRIDE:.*]] = llvm.mul %[[THREE]], %[[DIM2_STRIDE]] : i64
+//CHECK: %[[RESULT_LB:.*]] = llvm.mlir.constant(1 : i64) : i64
+//CHECK: %[[RBOX_TMP7_1:.*]] = llvm.insertvalue %[[RESULT_LB]], %[[RBOX_TMP6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP7_2:.*]] = llvm.insertvalue %[[RESULT_NELEMS]], %[[RBOX_TMP7_1]][7 : i32, 0 : i32, 1 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP7_3:.*]] = llvm.insertvalue %[[RESULT_STRIDE]], %[[RBOX_TMP7_2]][7 : i32, 0 : i32, 2 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RESULT_PTR_F32:.*]] = llvm.bitcast %[[RESULT_PTR_I8]] : !llvm.ptr<i8> to !llvm.ptr<f32>
+//CHECK: %[[RESULT_BOX:.*]] = llvm.insertvalue %[[RESULT_PTR_F32]], %[[RBOX_TMP7_3]][0 : i32] : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: llvm.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+//CHECK: llvm.call @bar1(%[[RESULT_BOX_REF]]) : (!llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>) -> ()
+
+
+// Test a rebox of an array section like x(3:60:9)%c(2:8) with both a triplet, a component and a substring where x is a fir.box.
+func private @bar(!fir.box<!fir.array<?x!fir.char<1,?>>>)
+func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}>>>) {
+ %c3_i64 = arith.constant 3 : i64
+ %c60_i64 = arith.constant 60 : i64
+ %c9_i64 = arith.constant 9 : i64
+ %c1_i64 = arith.constant 1 : i64
+ %c7_i64 = arith.constant 7 : i64
+ %0 = fir.field_index c, !fir.type<t{i:i32,c:!fir.char<1,10>}>
+ %1 = fircg.ext_rebox %arg0[%c3_i64, %c60_i64, %c9_i64] path %0 substr %c1_i64, %c7_i64 : (!fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}>>>, i64, i64, i64, !fir.field, i64, i64) -> !fir.box<!fir.array<?x!fir.char<1,?>>>
+ fir.call @bar(%1) : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> ()
+ return
+}
+
+//CHECK: llvm.func @bar(!llvm.ptr<struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>) attributes {sym_visibility = "private"}
+//CHECK-LABEL: llvm.func @foo
+//CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>
+//CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
+//CHECK: %[[RESULT_BOX_REF:.*]] = llvm.alloca %[[ONE]] x !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+//CHECK: %[[RESULT_LB:.*]] = llvm.mlir.constant(3 : i64) : i64
+//CHECK: %[[RESULT_UB:.*]] = llvm.mlir.constant(60 : i64) : i64
+//CHECK: %[[RESULT_STRIDE:.*]] = llvm.mlir.constant(9 : i64) : i64
+//CHECK: %[[COMPONENT_OFFSET_1:.*]] = llvm.mlir.constant(1 : i64) : i64
+//CHECK: %[[ELEM_SIZE:.*]] = llvm.mlir.constant(7 : i64) : i64
+//CHECK: %[[COMPONENT_OFFSET_2:.*]] = llvm.mlir.constant(1 : i32) : i32
+//CHECK: %[[TYPE_CHAR:.*]] = llvm.mlir.constant(32 : i32) : i32
+//CHECK: %[[RBOX_TMP1:.*]] = llvm.insertvalue %[[ELEM_SIZE]], %{{.*}}[1 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP2:.*]] = llvm.insertvalue %{{.*}}, %[[RBOX_TMP1]][2 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RANK:.*]] = llvm.mlir.constant(1 : i32) : i32
+//CHECK: %[[RANK_I8:.*]] = llvm.trunc %[[RANK]] : i32 to i8
+//CHECK: %[[RBOX_TMP3:.*]] = llvm.insertvalue %[[RANK_I8]], %[[RBOX_TMP2]][3 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[TYPE_CHAR_I8:.*]] = llvm.trunc %[[TYPE_CHAR]] : i32 to i8
+//CHECK: %[[RBOX_TMP4:.*]] = llvm.insertvalue %[[TYPE_CHAR_I8]], %[[RBOX_TMP3]][4 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP5:.*]] = llvm.insertvalue %{{.*}}, %[[RBOX_TMP4]][5 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[ADDENDUM:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
+//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64
+//CHECK: %[[ZERO_3:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32
+//CHECK: %[[STRIDE_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32
+//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_3]], %[[DIM_IDX]], %[[DIM1]], %[[STRIDE_IDX]]] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32, i32, i64, i32) -> !llvm.ptr<i64>
+//CHECK: %[[SRC_STRIDE:.*]] = llvm.load %[[SRC_STRIDE_PTR]] : !llvm.ptr<i64>
+//CHECK: %[[ZERO_4:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[ZERO_5:.*]] = llvm.mlir.constant(0 : i32) : i32
+//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_4]], %[[ZERO_5]]] : (!llvm.ptr<struct<(ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>>, i32, i32) -> !llvm.ptr<ptr<struct<"t", (i32, array<10 x i8>)>>>
+//CHECK: %[[SRC_ARRAY:.*]] = llvm.load %[[SRC_ARRAY_PTR]] : !llvm.ptr<ptr<struct<"t", (i32, array<10 x i8>)>>>
+//CHECK: %[[ZERO_6:.*]] = llvm.mlir.constant(0 : i64) : i64
+//CHECK: %[[SRC_CAST:.*]] = llvm.bitcast %[[SRC_ARRAY]] : !llvm.ptr<struct<"t", (i32, array<10 x i8>)>> to !llvm.ptr<struct<"t", (i32, array<10 x i8>)>>
+//CHECK: %[[TMP_COMPONENT:.*]] = llvm.getelementptr %[[SRC_CAST]][%[[ZERO_6]], %[[COMPONENT_OFFSET_2]]] : (!llvm.ptr<struct<"t", (i32, array<10 x i8>)>>, i64, i32) -> !llvm.ptr<struct<"t", (i32, array<10 x i8>)>>
+//CHECK: %[[COMPONENT:.*]] = llvm.getelementptr %[[TMP_COMPONENT]][%[[COMPONENT_OFFSET_1]]] : (!llvm.ptr<struct<"t", (i32, array<10 x i8>)>>, i64) -> !llvm.ptr<struct<"t", (i32, array<10 x i8>)>>
+//CHECK: %[[COMPONENT_CAST:.*]] = llvm.bitcast %[[COMPONENT]] : !llvm.ptr<struct<"t", (i32, array<10 x i8>)>> to !llvm.ptr<i8>
+//CHECK: %[[SRC_LB:.*]] = llvm.mlir.constant(1 : i64) : i64
+//CHECK: %[[RESULT_TMP0:.*]] = llvm.sub %[[RESULT_LB]], %[[SRC_LB]] : i64
+//CHECK: %[[RESULT_OFFSET_START:.*]] = llvm.mul %[[RESULT_TMP0]], %[[SRC_STRIDE]] : i64
+//CHECK: %[[RESULT_PTR_I8:.*]] = llvm.getelementptr %[[COMPONENT_CAST]][%[[RESULT_OFFSET_START]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
+//CHECK: %[[RESULT_TMP1:.*]] = llvm.sub %[[RESULT_UB]], %[[RESULT_LB]] : i64
+//CHECK: %[[RESULT_TMP2:.*]] = llvm.add %[[RESULT_TMP1]], %[[RESULT_STRIDE]] : i64
+//CHECK: %[[RESULT_TMP3:.*]] = llvm.sdiv %[[RESULT_TMP2]], %[[RESULT_STRIDE]] : i64
+//CHECK: %[[RESULT_TMP_PRED:.*]] = llvm.icmp "sgt" %[[RESULT_TMP3]], %[[ZERO_6]] : i64
+//CHECK: %[[RESULT_NELEMS:.*]] = llvm.select %[[RESULT_TMP_PRED]], %[[RESULT_TMP3]], %[[ZERO_6]] : i1, i64
+//CHECK: %[[RESULT_TOTAL_STRIDE:.*]] = llvm.mul %[[RESULT_STRIDE]], %[[SRC_STRIDE]] : i64
+//CHECK: %[[RESULT_LB:.*]] = llvm.mlir.constant(1 : i64) : i64
+//CHECK: %[[RBOX_TMP7_1:.*]] = llvm.insertvalue %[[RESULT_LB]], %[[RBOX_TMP6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP7_2:.*]] = llvm.insertvalue %[[RESULT_NELEMS]], %[[RBOX_TMP7_1]][7 : i32, 0 : i32, 1 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RBOX_TMP7_3:.*]] = llvm.insertvalue %[[RESULT_TOTAL_STRIDE]], %[[RBOX_TMP7_2]][7 : i32, 0 : i32, 2 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: %[[RESULT_PTR_CAST:.*]] = llvm.bitcast %[[RESULT_PTR_I8]] : !llvm.ptr<i8> to !llvm.ptr<i8>
+//CHECK: %[[RESULT_BOX:.*]] = llvm.insertvalue %[[RESULT_PTR_CAST]], %[[RBOX_TMP7_3]][0 : i32] : !llvm.struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
+//CHECK: llvm.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.ptr<struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+//CHECK: llvm.call @bar(%[[RESULT_BOX_REF]]) : (!llvm.ptr<struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>) -> ()
+//CHECK: llvm.return
+//CHECK: }