// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-///
-/// \file
-/// Implements lowering. Convert Fortran source to
-/// [MLIR](https://github.com/tensorflow/mlir).
-///
-/// [Coding style](https://llvm.org/docs/CodingStandards.html)
-///
+//
+// Coding style: https://mlir.llvm.org/getting_started/DeveloperGuide/
+//
//===----------------------------------------------------------------------===//
#ifndef FORTRAN_LOWER_BRIDGE_H
/// Create a folding context. Careful: this is very expensive.
Fortran::evaluate::FoldingContext createFoldingContext() const;
+ bool validModule() { return getModule(); }
+
//===--------------------------------------------------------------------===//
// Perform the creation of an mlir::ModuleOp
//===--------------------------------------------------------------------===//
//
// Utility that defines fir call interface for procedure both on caller and
// and callee side and get the related FuncOp.
-// It does not emit any FIR code but for the created mlir::FuncOp, instead it
-// provides back a container of Symbol (callee side)/ActualArgument (caller
+// It does not emit any FIR code but for the created mlir::func::FuncOp, instead
+// it provides back a container of Symbol (callee side)/ActualArgument (caller
// side) with additional information for each element describing how it must be
-// plugged with the mlir::FuncOp.
+// plugged with the mlir::func::FuncOp.
// It handles the fact that hidden arguments may be inserted for the result.
// while lowering.
//
class CallInterfaceImpl;
/// CallInterface defines all the logic to determine FIR function interfaces
-/// from a characteristic, build the mlir::FuncOp and describe back the argument
-/// mapping to its user.
+/// from a characteristic, build the mlir::func::FuncOp and describe back the
+/// argument mapping to its user.
/// The logic is shared between the callee and caller sides that it accepts as
/// a curiously recursive template to handle the few things that cannot be
/// shared between both sides (getting characteristics, mangled name, location).
using FirValue = typename PassedEntityTypes<T>::FirValue;
/// FirPlaceHolder are place holders for the mlir inputs and outputs that are
- /// created during the first pass before the mlir::FuncOp is created.
+ /// created during the first pass before the mlir::func::FuncOp is created.
struct FirPlaceHolder {
FirPlaceHolder(mlir::Type t, int passedPosition, Property p,
llvm::ArrayRef<mlir::NamedAttribute> attrs)
/// How entity is passed by.
PassEntityBy passBy;
/// What is the entity (SymbolRef for callee/ActualArgument* for caller)
- /// What is the related mlir::FuncOp argument(s) (mlir::Value for callee /
- /// index for the caller).
+ /// What is the related mlir::func::FuncOp argument(s) (mlir::Value for
+ /// callee / index for the caller).
FortranEntity entity;
FirValue firArgument;
FirValue firLength; /* only for AddressAndLength */
nullptr;
};
- /// Return the mlir::FuncOp. Note that front block is added by this
+ /// Return the mlir::func::FuncOp. Note that front block is added by this
/// utility if callee side.
- mlir::FuncOp getFuncOp() const { return func; }
+ mlir::func::FuncOp getFuncOp() const { return func; }
/// Number of MLIR inputs/outputs of the created FuncOp.
std::size_t getNumFIRArguments() const { return inputs.size(); }
std::size_t getNumFIRResults() const { return outputs.size(); }
llvm::SmallVector<mlir::Type> getResultType() const;
/// Return a container of Symbol/ActualArgument* and how they must
- /// be plugged with the mlir::FuncOp.
+ /// be plugged with the mlir::func::FuncOp.
llvm::ArrayRef<PassedEntity> getPassedArguments() const {
return passedArguments;
}
mlir::FunctionType genFunctionType();
/// determineInterface is the entry point of the first pass that defines the
- /// interface and is required to get the mlir::FuncOp.
+ /// interface and is required to get the mlir::func::FuncOp.
void
determineInterface(bool isImplicit,
const Fortran::evaluate::characteristics::Procedure &);
/// CRTP handle.
T &side() { return *static_cast<T *>(this); }
/// Entry point to be called by child ctor to analyze the signature and
- /// create/find the mlir::FuncOp. Child needs to be initialized first.
+ /// create/find the mlir::func::FuncOp. Child needs to be initialized first.
void declare();
- /// Second pass entry point, once the mlir::FuncOp is created.
+ /// Second pass entry point, once the mlir::func::FuncOp is created.
/// Nothing is done if it was already called.
void mapPassedEntities();
void mapBackInputToPassedEntity(const FirPlaceHolder &, FirValue);
llvm::SmallVector<FirPlaceHolder> outputs;
llvm::SmallVector<FirPlaceHolder> inputs;
- mlir::FuncOp func;
+ mlir::func::FuncOp func;
llvm::SmallVector<PassedEntity> passedArguments;
std::optional<PassedEntity> passedResult;
bool saveResult = false;
return procRef;
}
+ /// Get the SubprogramDetails that defines the interface of this call if it is
+ /// known at the call site. Return nullptr if it is not known.
+ const Fortran::semantics::SubprogramDetails *getInterfaceDetails() const;
+
bool isMainProgram() const { return false; }
/// Returns true if this is a call to a procedure pointer of a dummy
/// procedure.
const Fortran::semantics::Symbol *getProcedureSymbol() const;
- /// Add mlir::FuncOp entry block and map fir block arguments to Fortran dummy
- /// argument symbols.
- mlir::FuncOp addEntryBlockAndMapArguments();
+ /// Add mlir::func::FuncOp entry block and map fir block arguments to Fortran
+ /// dummy argument symbols.
+ mlir::func::FuncOp addEntryBlockAndMapArguments();
bool hasHostAssociated() const;
mlir::Type getHostAssociatedTy() const;
translateSignature(const Fortran::evaluate::ProcedureDesignator &,
Fortran::lower::AbstractConverter &);
-/// Declare or find the mlir::FuncOp named \p name. If the mlir::FuncOp does
-/// not exist yet, declare it with the signature translated from the
-/// ProcedureDesignator argument.
+/// Declare or find the mlir::func::FuncOp named \p name. If the
+/// mlir::func::FuncOp does not exist yet, declare it with the signature
+/// translated from the ProcedureDesignator argument.
/// Due to Fortran implicit function typing rules, the returned FuncOp is not
/// guaranteed to have the signature from ProcedureDesignator if the FuncOp was
/// already declared.
-mlir::FuncOp
+mlir::func::FuncOp
getOrDeclareFunction(llvm::StringRef name,
const Fortran::evaluate::ProcedureDesignator &,
Fortran::lower::AbstractConverter &);
namespace mlir {
class Location;
-}
+class Value;
+} // namespace mlir
-namespace Fortran::evaluate {
-template <typename>
-class Expr;
-struct SomeType;
-} // namespace Fortran::evaluate
+namespace fir {
+class AllocMemOp;
+class ArrayLoadOp;
+class ShapeOp;
+} // namespace fir
namespace Fortran::lower {
class AbstractConverter;
-class StatementContext;
-class SymMap;
class ExplicitIterSpace;
class ImplicitIterSpace;
class StatementContext;
-
-using SomeExpr = Fortran::evaluate::Expr<Fortran::evaluate::SomeType>;
+class SymMap;
/// Create an extended expression value.
fir::ExtendedValue createSomeExtendedExpression(mlir::Location loc,
AbstractConverter &converter,
const SomeExpr &expr, SymMap &symMap);
-/// Lower an array expression to a value of type box. The expression must be a
-/// variable.
-fir::ExtendedValue createSomeArrayBox(AbstractConverter &converter,
- const SomeExpr &expr, SymMap &symMap,
- StatementContext &stmtCtx);
-
-/// Lower a subroutine call. This handles both elemental and non elemental
-/// subroutines. \p isUserDefAssignment must be set if this is called in the
-/// context of a user defined assignment. For subroutines with alternate
-/// returns, the returned value indicates which label the code should jump to.
-/// The returned value is null otherwise.
-mlir::Value createSubroutineCall(AbstractConverter &converter,
- const evaluate::ProcedureRef &call,
- ExplicitIterSpace &explicitIterSpace,
- ImplicitIterSpace &implicitIterSpace,
- SymMap &symMap, StatementContext &stmtCtx,
- bool isUserDefAssignment);
-
-/// Create the address of the box.
-/// \p expr must be the designator of an allocatable/pointer entity.
-fir::MutableBoxValue createMutableBox(mlir::Location loc,
- AbstractConverter &converter,
- const SomeExpr &expr, SymMap &symMap);
-
/// Create a fir::BoxValue describing the value of \p expr.
/// If \p expr is a variable without vector subscripts, the fir::BoxValue
/// described the variable storage. Otherwise, the created fir::BoxValue
ImplicitIterSpace &implicitIterSpace,
SymMap &symMap, StatementContext &stmtCtx);
+/// In the context of a FORALL, a pointer assignment is allowed. The pointer
+/// assignment can be elementwise on an array of pointers. The bounds
+/// expressions as well as the component path may contain references to the
+/// concurrent control variables. The explicit iteration space must be defined.
+void createAnyArrayPointerAssignment(
+ AbstractConverter &converter, const SomeExpr &lhs, const SomeExpr &rhs,
+ const evaluate::Assignment::BoundsSpec &bounds,
+ ExplicitIterSpace &explicitIterSpace, ImplicitIterSpace &implicitIterSpace,
+ SymMap &symMap);
+/// Support the bounds remapping flavor of pointer assignment.
+void createAnyArrayPointerAssignment(
+ AbstractConverter &converter, const SomeExpr &lhs, const SomeExpr &rhs,
+ const evaluate::Assignment::BoundsRemapping &bounds,
+ ExplicitIterSpace &explicitIterSpace, ImplicitIterSpace &implicitIterSpace,
+ SymMap &symMap);
+
/// Lower an assignment to an allocatable array, allocating the array if
/// it is not allocated yet or reallocation it if it does not conform
/// with the right hand side.
const SomeExpr &expr, mlir::Value raggedHeader,
SymMap &symMap, StatementContext &stmtCtx);
+/// Lower an array expression to a value of type box. The expression must be a
+/// variable.
+fir::ExtendedValue createSomeArrayBox(AbstractConverter &converter,
+ const SomeExpr &expr, SymMap &symMap,
+ StatementContext &stmtCtx);
+
+/// Lower a subroutine call. This handles both elemental and non elemental
+/// subroutines. \p isUserDefAssignment must be set if this is called in the
+/// context of a user defined assignment. For subroutines with alternate
+/// returns, the returned value indicates which label the code should jump to.
+/// The returned value is null otherwise.
+mlir::Value createSubroutineCall(AbstractConverter &converter,
+ const evaluate::ProcedureRef &call,
+ ExplicitIterSpace &explicitIterSpace,
+ ImplicitIterSpace &implicitIterSpace,
+ SymMap &symMap, StatementContext &stmtCtx,
+ bool isUserDefAssignment);
+
// Attribute for an alloca that is a trivial adaptor for converting a value to
// pass-by-ref semantics for a VALUE parameter. The optimizer may be able to
// eliminate these.
mlir::Value genMax(fir::FirOpBuilder &, mlir::Location,
llvm::ArrayRef<mlir::Value> args);
+/// Generate minimum. Same constraints as genMax.
+mlir::Value genMin(fir::FirOpBuilder &, mlir::Location,
+ llvm::ArrayRef<mlir::Value> args);
+
/// Generate power function x**y with the given expected
/// result type.
mlir::Value genPow(fir::FirOpBuilder &, mlir::Location, mlir::Type resultType,
#define FORTRAN_OPTIMIZER_BUILDER_CHARACTER_H
#include "flang/Optimizer/Builder/BoxValue.h"
-#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
+
+namespace fir {
+class FirOpBuilder;
+}
namespace fir::factory {
class CharacterExprHelper {
public:
/// Constructor.
- explicit CharacterExprHelper(fir::FirOpBuilder &builder, mlir::Location loc)
+ explicit CharacterExprHelper(FirOpBuilder &builder, mlir::Location loc)
: builder{builder}, loc{loc} {}
CharacterExprHelper(const CharacterExprHelper &) = delete;
/// Extract the kind of a character or array of character type.
static fir::KindTy getCharacterOrSequenceKind(mlir::Type type);
+ // TODO: Do we really need all these flavors of unwrapping to get the fir.char
+ // type? Or can we merge these? It would be better to merge them and eliminate
+ // the confusion.
+
/// Determine the inner character type. Unwraps references, boxes, and
/// sequences to find the !fir.char element type.
static fir::CharacterType getCharType(mlir::Type type);
- /// Determine the base character type
+ /// Get fir.char<kind> type with the same kind as inside str.
static fir::CharacterType getCharacterType(mlir::Type type);
static fir::CharacterType getCharacterType(const fir::CharBoxValue &box);
static fir::CharacterType getCharacterType(mlir::Value str);
void createAssign(const fir::CharBoxValue &lhs, const fir::CharBoxValue &rhs);
mlir::Value createBlankConstantCode(fir::CharacterType type);
+private:
FirOpBuilder &builder;
mlir::Location loc;
};
-// FIXME: Move these to Optimizer
-mlir::FuncOp getLlvmMemcpy(FirOpBuilder &builder);
-mlir::FuncOp getLlvmMemmove(FirOpBuilder &builder);
-mlir::FuncOp getLlvmMemset(FirOpBuilder &builder);
-mlir::FuncOp getRealloc(FirOpBuilder &builder);
-
//===----------------------------------------------------------------------===//
// Tools to work with Character dummy procedures
//===----------------------------------------------------------------------===//
/// one provided by \p funcPointerType.
mlir::Type getCharacterProcedureTupleType(mlir::Type funcPointerType);
-/// Is this tuple type holding a character function and its result length ?
-bool isCharacterProcedureTuple(mlir::Type type);
-
-/// Is \p tuple a value holding a character function address and its result
-/// length ?
-inline bool isCharacterProcedureTuple(mlir::Value tuple) {
- return isCharacterProcedureTuple(tuple.getType());
-}
-
/// Create a tuple<addr, len> given \p addr and \p len as well as the tuple
/// type \p argTy. \p addr must be any function address, and \p len must be
/// any integer. Converts will be inserted if needed if \addr and \p len
#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Dialect/FIRType.h"
#include "flang/Optimizer/Support/KindMapping.h"
-#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
namespace fir {
class AbstractArrayBox;
return mlir::SymbolRefAttr::get(getContext(), str);
}
- /// Get the mlir real type that implements fortran REAL(kind).
+ /// Get the mlir float type that implements Fortran REAL(kind).
mlir::Type getRealType(int kind);
fir::BoxProcType getBoxProcType(mlir::FunctionType funcTy) {
mlir::FuncOp getNamedFunction(llvm::StringRef name) {
return getNamedFunction(getModule(), name);
}
-
static mlir::FuncOp getNamedFunction(mlir::ModuleOp module,
llvm::StringRef name);
mlir::Value ub, mlir::Value step,
mlir::Type type);
+ /// Dump the current function. (debug)
+ LLVM_DUMP_METHOD void dumpFunc();
+
private:
const KindMapping &kindMap;
};
mlir::Location loc,
fir::SequenceType seqTy);
-//===----------------------------------------------------------------------===//
+//===--------------------------------------------------------------------===//
// Location helpers
-//===----------------------------------------------------------------------===//
+//===--------------------------------------------------------------------===//
/// Generate a string literal containing the file name and return its address
mlir::Value locationToFilename(fir::FirOpBuilder &, mlir::Location);
-
/// Generate a constant of the given type with the location line number
mlir::Value locationToLineNo(fir::FirOpBuilder &, mlir::Location, mlir::Type);
-/// Builds and returns the type of a ragged array header used to cache mask
-/// evaluations. RaggedArrayHeader is defined in
-/// flang/include/flang/Runtime/ragged.h.
-mlir::TupleType getRaggedArrayHeaderType(fir::FirOpBuilder &builder);
-
-/// Create the zero value of a given the numerical or logical \p type (`false`
-/// for logical types).
-mlir::Value createZeroValue(fir::FirOpBuilder &builder, mlir::Location loc,
- mlir::Type type);
-
//===--------------------------------------------------------------------===//
// ExtendedValue helpers
//===--------------------------------------------------------------------===//
const fir::ExtendedValue &lhs,
const fir::ExtendedValue &rhs);
+/// Builds and returns the type of a ragged array header used to cache mask
+/// evaluations. RaggedArrayHeader is defined in
+/// flang/include/flang/Runtime/ragged.h.
+mlir::TupleType getRaggedArrayHeaderType(fir::FirOpBuilder &builder);
+
/// Generate the, possibly dynamic, LEN of a CHARACTER. \p arrLoad determines
/// the base array. After applying \p path, the result must be a reference to a
/// `!fir.char` type object. \p substring must have 0, 1, or 2 members. The
llvm::ArrayRef<mlir::Value> path,
llvm::ArrayRef<mlir::Value> substring);
+/// Create the zero value of a given the numerical or logical \p type (`false`
+/// for logical types).
+mlir::Value createZeroValue(fir::FirOpBuilder &builder, mlir::Location loc,
+ mlir::Type type);
+
} // namespace fir::factory
#endif // FORTRAN_OPTIMIZER_BUILDER_FIRBUILDER_H
namespace fir::factory {
+/// Get the LLVM intrinsic for `memcpy`. Use the 64 bit version.
+mlir::func::FuncOp getLlvmMemcpy(FirOpBuilder &builder);
+
+/// Get the LLVM intrinsic for `memmove`. Use the 64 bit version.
+mlir::func::FuncOp getLlvmMemmove(FirOpBuilder &builder);
+
+/// Get the LLVM intrinsic for `memset`. Use the 64 bit version.
+mlir::func::FuncOp getLlvmMemset(FirOpBuilder &builder);
+
+/// Get the C standard library `realloc` function.
+mlir::func::FuncOp getRealloc(FirOpBuilder &builder);
+
/// Get the `llvm.stacksave` intrinsic.
mlir::func::FuncOp getLlvmStackSave(FirOpBuilder &builder);
/// Get the `llvm.stackrestore` intrinsic.
mlir::func::FuncOp getLlvmStackRestore(FirOpBuilder &builder);
+/// Get the `llvm.init.trampoline` intrinsic.
+mlir::func::FuncOp getLlvmInitTrampoline(FirOpBuilder &builder);
+
+/// Get the `llvm.adjust.trampoline` intrinsic.
+mlir::func::FuncOp getLlvmAdjustTrampoline(FirOpBuilder &builder);
+
} // namespace fir::factory
#endif // FLANG_OPTIMIZER_BUILDER_LOWLEVELINTRINSICS_H
];
}
+def BoxedProcedurePass : Pass<"boxed-procedure", "mlir::ModuleOp"> {
+ let constructor = "::fir::createBoxedProcedurePass()";
+ let options = [
+ Option<"useThunks", "use-thunks",
+ "bool", /*default=*/"true",
+ "Convert procedure pointer abstractions to a single code pointer, "
+ "deploying thunks wherever required.">
+ ];
+}
+
#endif // FORTRAN_OPTIMIZER_CODEGEN_FIR_PASSES
using LLVMIRLoweringPrinter =
std::function<void(llvm::Module &, llvm::raw_ostream &)>;
+
/// Convert the LLVM IR dialect to LLVM-IR proper
std::unique_ptr<mlir::Pass> createLLVMDialectToLLVMPass(
llvm::raw_ostream &output,
LLVMIRLoweringPrinter printer =
[](llvm::Module &m, llvm::raw_ostream &out) { m.print(out, nullptr); });
+/// Convert boxproc values to a lower level representation. The default is to
+/// use function pointers and thunks.
+std::unique_ptr<mlir::Pass> createBoxedProcedurePass();
+std::unique_ptr<mlir::Pass> createBoxedProcedurePass(bool useThunks);
+
// declarative passes
#define GEN_PASS_REGISTRATION
#include "flang/Optimizer/CodeGen/CGPasses.h.inc"
then the form takes only the procedure's symbol.
```mlir
- %0 = fir.emboxproc @f : ((i32) -> i32) -> !fir.boxproc<(i32) -> i32>
+ %f = ... : (i32) -> i32
+ %0 = fir.emboxproc %f : ((i32) -> i32) -> !fir.boxproc<(i32) -> i32>
```
An internal procedure requiring a host instance for correct execution uses
promotion of local values.
```mlir
- %4 = ... : !fir.ref<tuple<i32, i32>>
- %5 = fir.emboxproc @g, %4 : ((i32) -> i32, !fir.ref<tuple<i32, i32>>) -> !fir.boxproc<(i32) -> i32>
+ %4 = ... : !fir.ref<tuple<!fir.ref<i32>, !fir.ref<i32>>>
+ %g = ... : (i32) -> i32
+ %5 = fir.emboxproc %g, %4 : ((i32) -> i32, !fir.ref<tuple<!fir.ref<i32>, !fir.ref<i32>>>) -> !fir.boxproc<(i32) -> i32>
```
}];
- let arguments = (ins SymbolRefAttr:$funcname, AnyReferenceLike:$host);
+ let arguments = (ins FuncType:$func, Optional<fir_ReferenceType>:$host);
let results = (outs fir_BoxProcType);
- let hasCustomAssemblyFormat = 1;
+ let assemblyFormat = [{
+ $func (`,` $host^)? attr-dict `:` functional-type(operands, results)
+ }];
+
let hasVerifier = 1;
}
```mlir
%51 = fir.box_addr %box : (!fir.box<f64>) -> !fir.ref<f64>
%52 = fir.box_addr %boxchar : (!fir.boxchar<1>) -> !fir.ref<!fir.char<1>>
- %53 = fir.box_addr %boxproc : (!fir.boxproc<!P>) -> !fir.ref<!P>
+ %53 = fir.box_addr %boxproc : (!fir.boxproc<!P>) -> !P
```
}];
- let arguments = (ins fir_BoxType:$val);
+ let arguments = (ins AnyBoxLike:$val);
- let results = (outs AnyReferenceLike);
+ let results = (outs AnyCodeOrDataRefLike);
let hasFolder = 1;
}
namespace fir {
-/// return true iff the Operation is a non-volatile LoadOp
+/// Return true iff the Operation is a non-volatile LoadOp or ArrayLoadOp.
inline bool nonVolatileLoad(mlir::Operation *op) {
if (auto load = mlir::dyn_cast<fir::LoadOp>(op))
return !load->getAttr("volatile");
+ if (auto arrLoad = mlir::dyn_cast<fir::ArrayLoadOp>(op))
+ return !arrLoad->getAttr("volatile");
return false;
}
-/// return true iff the Operation is a call
+/// Return true iff the Operation is a call.
inline bool isaCall(mlir::Operation *op) {
- return mlir::isa<fir::CallOp>(op) || llvm::isa<fir::DispatchOp>(op) ||
+ return mlir::isa<fir::CallOp>(op) || mlir::isa<fir::DispatchOp>(op) ||
mlir::isa<mlir::func::CallOp>(op) ||
mlir::isa<mlir::func::CallIndirectOp>(op);
}
/// Is `t` a type that can conform to be pass-by-reference? Depending on the
/// context, these types may simply demote to pass-by-reference or a reference
-/// to them may have to be passed instead.
+/// to them may have to be passed instead. Functions are always referent.
inline bool conformsWithPassByRef(mlir::Type t) {
- return isa_ref_type(t) || isa_box_type(t);
+ return isa_ref_type(t) || isa_box_type(t) || t.isa<mlir::FunctionType>();
}
/// Is `t` a derived (record) type?
/// Returns true iff the type `t` does not have a constant size.
bool hasDynamicSize(mlir::Type t);
+inline unsigned getRankOfShapeType(mlir::Type t) {
+ if (auto shTy = t.dyn_cast<fir::ShapeType>())
+ return shTy.getRank();
+ if (auto shTy = t.dyn_cast<fir::ShapeShiftType>())
+ return shTy.getRank();
+ if (auto shTy = t.dyn_cast<fir::ShiftType>())
+ return shTy.getRank();
+ return 0;
+}
+
/// If `t` is a SequenceType return its element type, otherwise return `t`.
inline mlir::Type unwrapSequenceType(mlir::Type t) {
if (auto seqTy = t.dyn_cast<fir::SequenceType>())
return t;
}
+/// Unwrap all pointer and box types and return the element type if it is a
+/// sequence type, otherwise return null.
+inline fir::SequenceType unwrapUntilSeqType(mlir::Type t) {
+ while (true) {
+ if (!t)
+ return {};
+ if (auto ty = dyn_cast_ptrOrBoxEleTy(t)) {
+ t = ty;
+ continue;
+ }
+ if (auto seqTy = t.dyn_cast<fir::SequenceType>())
+ return seqTy;
+ return {};
+ }
+}
+
#ifndef NDEBUG
// !fir.ptr<X> and !fir.heap<X> where X is !fir.ptr, !fir.heap, or !fir.ref
// is undefined and disallowed.
fir_HeapType.predicate, fir_PointerType.predicate,
fir_LLVMPointerType.predicate]>, "any reference">;
+def FuncType : TypeConstraint<FunctionType.predicate, "function type">;
+
+def AnyCodeOrDataRefLike : TypeConstraint<Or<[AnyReferenceLike.predicate,
+ FunctionType.predicate]>, "any code or data reference">;
+
def RefOrLLVMPtr : TypeConstraint<Or<[fir_ReferenceType.predicate,
fir_LLVMPointerType.predicate]>, "fir.ref or fir.llvm_ptr">;
DisableOption(TargetRewrite, "target-rewrite", "rewrite FIR for target");
DisableOption(FirToLlvmIr, "fir-to-llvmir", "FIR to LLVM-IR dialect");
DisableOption(LlvmIrToLlvm, "llvm", "conversion to LLVM");
+DisableOption(BoxedProcedureRewrite, "boxed-procedure-rewrite",
+ "rewrite boxed procedures");
#endif
/// Generic for adding a pass to the pass manager if it is not disabled.
addPassConditionally(pm, disableLlvmIrToLlvm,
[&]() { return fir::createLLVMDialectToLLVMPass(output); });
}
+
+inline void addBoxedProcedurePass(mlir::PassManager &pm) {
+ addPassConditionally(pm, disableBoxedProcedureRewrite,
+ [&]() { return fir::createBoxedProcedurePass(); });
+}
#endif
/// Create a pass pipeline for running default optimization passes for
#if !defined(FLANG_EXCLUDE_CODEGEN)
inline void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm) {
+ fir::addBoxedProcedurePass(pm);
pm.addNestedPass<mlir::FuncOp>(fir::createAbstractResultOptPass());
fir::addCodeGenRewritePass(pm);
fir::addTargetRewritePass(pm);
//===----------------------------------------------------------------------===//
#include "flang/Lower/Bridge.h"
-#include "flang/Evaluate/tools.h"
#include "flang/Lower/Allocatable.h"
#include "flang/Lower/CallInterface.h"
+#include "flang/Lower/Coarray.h"
#include "flang/Lower/ConvertExpr.h"
#include "flang/Lower/ConvertType.h"
#include "flang/Lower/ConvertVariable.h"
+#include "flang/Lower/HostAssociations.h"
#include "flang/Lower/IO.h"
#include "flang/Lower/IterationSpace.h"
#include "flang/Lower/Mangler.h"
+#include "flang/Lower/OpenACC.h"
#include "flang/Lower/OpenMP.h"
#include "flang/Lower/PFTBuilder.h"
#include "flang/Lower/Runtime.h"
#include "flang/Lower/StatementContext.h"
-#include "flang/Lower/SymbolMap.h"
+#include "flang/Lower/Support/Utils.h"
#include "flang/Lower/Todo.h"
#include "flang/Optimizer/Builder/BoxValue.h"
#include "flang/Optimizer/Builder/Character.h"
-#include "flang/Optimizer/Builder/MutableBox.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
#include "flang/Optimizer/Builder/Runtime/Character.h"
#include "flang/Optimizer/Builder/Runtime/Ragged.h"
#include "flang/Optimizer/Dialect/FIRAttr.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
#include "flang/Optimizer/Support/FIRContext.h"
+#include "flang/Optimizer/Support/FatalError.h"
#include "flang/Optimizer/Support/InternalNames.h"
+#include "flang/Optimizer/Transforms/Passes.h"
+#include "flang/Parser/parse-tree.h"
#include "flang/Runtime/iostat.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
#include "mlir/IR/PatternMatch.h"
+#include "mlir/Parser/Parser.h"
#include "mlir/Transforms/RegionUtils.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
#define DEBUG_TYPE "flang-lower-bridge"
-using namespace mlir;
-
static llvm::cl::opt<bool> dumpBeforeFir(
"fdebug-dump-pre-fir", llvm::cl::init(false),
llvm::cl::desc("dump the Pre-FIR tree prior to FIR generation"));
+static llvm::cl::opt<bool> forceLoopToExecuteOnce(
+ "always-execute-loop-body", llvm::cl::init(false),
+ llvm::cl::desc("force the body of a loop to execute at least once"));
+
namespace {
/// Helper class to generate the runtime type info global data. This data
/// is required to describe the derived type to the runtime so that it can
/// creation.
llvm::SmallSetVector<Fortran::semantics::SymbolRef, 64> seen;
};
+
} // namespace
//===----------------------------------------------------------------------===//
[&](Fortran::lower::pft::FunctionLikeUnit &f) { lowerFunc(f); },
[&](Fortran::lower::pft::ModuleLikeUnit &m) { lowerMod(m); },
[&](Fortran::lower::pft::BlockDataUnit &b) {},
- [&](Fortran::lower::pft::CompilerDirectiveUnit &d) {},
+ [&](Fortran::lower::pft::CompilerDirectiveUnit &d) {
+ setCurrentPosition(
+ d.get<Fortran::parser::CompilerDirective>().source);
+ mlir::emitWarning(toLocation(),
+ "ignoring all compiler directives");
+ },
},
u);
}
fir::ExtendedValue genExprAddr(const Fortran::lower::SomeExpr &expr,
Fortran::lower::StatementContext &context,
mlir::Location *loc = nullptr) override final {
- return createSomeExtendedAddress(loc ? *loc : toLocation(), *this, expr,
- localSymbols, context);
+ return Fortran::lower::createSomeExtendedAddress(
+ loc ? *loc : toLocation(), *this, expr, localSymbols, context);
}
fir::ExtendedValue
genExprValue(const Fortran::lower::SomeExpr &expr,
Fortran::lower::StatementContext &context,
mlir::Location *loc = nullptr) override final {
- return createSomeExtendedExpression(loc ? *loc : toLocation(), *this, expr,
- localSymbols, context);
+ return Fortran::lower::createSomeExtendedExpression(
+ loc ? *loc : toLocation(), *this, expr, localSymbols, context);
}
fir::MutableBoxValue
genExprMutableBox(mlir::Location loc,
mlir::Type genType(const Fortran::lower::SomeExpr &expr) override final {
return Fortran::lower::translateSomeExprToFIRType(*this, expr);
}
+ mlir::Type genType(const Fortran::lower::pft::Variable &var) override final {
+ return Fortran::lower::translateVariableToFIRType(*this, var);
+ }
mlir::Type genType(Fortran::lower::SymbolRef sym) override final {
return Fortran::lower::translateSymbolToFIRType(*this, sym);
}
return Fortran::lower::translateDerivedTypeToFIRType(*this, tySpec);
}
mlir::Type genType(Fortran::common::TypeCategory tc) override final {
- TODO_NOLOC("Not implemented genType TypeCategory. Needed for more complex "
- "expression lowering");
- }
- mlir::Type genType(const Fortran::lower::pft::Variable &var) override final {
- return Fortran::lower::translateVariableToFIRType(*this, var);
- }
-
- void setCurrentPosition(const Fortran::parser::CharBlock &position) {
- if (position != Fortran::parser::CharBlock{})
- currentPosition = position;
- }
-
- //===--------------------------------------------------------------------===//
- // Utility methods
- //===--------------------------------------------------------------------===//
-
- /// Convert a parser CharBlock to a Location
- mlir::Location toLocation(const Fortran::parser::CharBlock &cb) {
- return genLocation(cb);
- }
-
- mlir::Location toLocation() { return toLocation(currentPosition); }
- void setCurrentEval(Fortran::lower::pft::Evaluation &eval) {
- evalPtr = &eval;
- }
- Fortran::lower::pft::Evaluation &getEval() {
- assert(evalPtr && "current evaluation not set");
- return *evalPtr;
+ return Fortran::lower::getFIRType(
+ &getMLIRContext(), tc, bridge.getDefaultKinds().GetDefaultKind(tc),
+ llvm::None);
}
mlir::Location getCurrentLocation() override final { return toLocation(); }
return bridge.getKindMap();
}
- /// Return the predicate: "current block does not have a terminator branch".
- bool blockIsUnterminated() {
- mlir::Block *currentBlock = builder->getBlock();
- return currentBlock->empty() ||
- !currentBlock->back().hasTrait<mlir::OpTrait::IsTerminator>();
+ mlir::Value hostAssocTupleValue() override final { return hostAssocTuple; }
+
+ /// Record a binding for the ssa-value of the tuple for this function.
+ void bindHostAssocTuple(mlir::Value val) override final {
+ assert(!hostAssocTuple && val);
+ hostAssocTuple = val;
}
- /// Unconditionally switch code insertion to a new block.
- void startBlock(mlir::Block *newBlock) {
- assert(newBlock && "missing block");
- // Default termination for the current block is a fallthrough branch to
- // the new block.
- if (blockIsUnterminated())
- genFIRBranch(newBlock);
- // Some blocks may be re/started more than once, and might not be empty.
- // If the new block already has (only) a terminator, set the insertion
- // point to the start of the block. Otherwise set it to the end.
- // Note that setting the insertion point causes the subsequent function
- // call to check the existence of terminator in the newBlock.
- builder->setInsertionPointToStart(newBlock);
- if (blockIsUnterminated())
- builder->setInsertionPointToEnd(newBlock);
+ void registerRuntimeTypeInfo(
+ mlir::Location loc,
+ Fortran::lower::SymbolRef typeInfoSym) override final {
+ runtimeTypeInfoConverter.registerTypeInfoSymbol(*this, loc, typeInfoSym);
}
- /// Conditionally switch code insertion to a new block.
- void maybeStartBlock(mlir::Block *newBlock) {
- if (newBlock)
- startBlock(newBlock);
+private:
+ FirConverter() = delete;
+ FirConverter(const FirConverter &) = delete;
+ FirConverter &operator=(const FirConverter &) = delete;
+
+ //===--------------------------------------------------------------------===//
+ // Helper member functions
+ //===--------------------------------------------------------------------===//
+
+ mlir::Value createFIRExpr(mlir::Location loc,
+ const Fortran::lower::SomeExpr *expr,
+ Fortran::lower::StatementContext &stmtCtx) {
+ return fir::getBase(genExprValue(*expr, stmtCtx, &loc));
}
- /// Emit return and cleanup after the function has been translated.
- void endNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
- setCurrentPosition(Fortran::lower::pft::stmtSourceLoc(funit.endStmt));
- if (funit.isMainProgram())
- genExitRoutine();
- else
- genFIRProcedureExit(funit, funit.getSubprogramSymbol());
- funit.finalBlock = nullptr;
- LLVM_DEBUG(llvm::dbgs() << "*** Lowering result:\n\n"
- << *builder->getFunction() << '\n');
- // FIXME: Simplification should happen in a normal pass, not here.
- mlir::IRRewriter rewriter(*builder);
- (void)mlir::simplifyRegions(rewriter,
- {builder->getRegion()}); // remove dead code
- delete builder;
- builder = nullptr;
- hostAssocTuple = mlir::Value{};
- localSymbols.clear();
+ /// Find the symbol in the local map or return null.
+ Fortran::lower::SymbolBox
+ lookupSymbol(const Fortran::semantics::Symbol &sym) {
+ if (Fortran::lower::SymbolBox v = localSymbols.lookupSymbol(sym))
+ return v;
+ return {};
}
- /// Helper to generate GlobalOps when the builder is not positioned in any
- /// region block. This is required because the FirOpBuilder assumes it is
- /// always positioned inside a region block when creating globals, the easiest
- /// way comply is to create a dummy function and to throw it afterwards.
- void createGlobalOutsideOfFunctionLowering(
- const std::function<void()> &createGlobals) {
- // FIXME: get rid of the bogus function context and instantiate the
- // globals directly into the module.
- MLIRContext *context = &getMLIRContext();
- mlir::FuncOp func = fir::FirOpBuilder::createFunction(
- mlir::UnknownLoc::get(context), getModuleOp(),
- fir::NameUniquer::doGenerated("Sham"),
- mlir::FunctionType::get(context, llvm::None, llvm::None));
- func.addEntryBlock();
- builder = new fir::FirOpBuilder(func, bridge.getKindMap());
- createGlobals();
- if (mlir::Region *region = func.getCallableRegion())
- region->dropAllReferences();
- func.erase();
- delete builder;
- builder = nullptr;
- localSymbols.clear();
+ /// Find the symbol in the inner-most level of the local map or return null.
+ Fortran::lower::SymbolBox
+ shallowLookupSymbol(const Fortran::semantics::Symbol &sym) {
+ if (Fortran::lower::SymbolBox v = localSymbols.shallowLookupSymbol(sym))
+ return v;
+ return {};
}
- /// Instantiate the data from a BLOCK DATA unit.
- void lowerBlockData(Fortran::lower::pft::BlockDataUnit &bdunit) {
- createGlobalOutsideOfFunctionLowering([&]() {
- Fortran::lower::AggregateStoreMap fakeMap;
- for (const auto &[_, sym] : bdunit.symTab) {
- if (sym->has<Fortran::semantics::ObjectEntityDetails>()) {
- Fortran::lower::pft::Variable var(*sym, true);
- instantiateVar(var, fakeMap);
- }
- }
- });
+
+ /// Add the symbol to the local map and return `true`. If the symbol is
+ /// already in the map and \p forced is `false`, the map is not updated.
+ /// Instead the value `false` is returned.
+ bool addSymbol(const Fortran::semantics::SymbolRef sym, mlir::Value val,
+ bool forced = false) {
+ if (!forced && lookupSymbol(sym))
+ return false;
+ localSymbols.addSymbol(sym, val, forced);
+ return true;
}
- /// Map mlir function block arguments to the corresponding Fortran dummy
- /// variables. When the result is passed as a hidden argument, the Fortran
- /// result is also mapped. The symbol map is used to hold this mapping.
- void mapDummiesAndResults(Fortran::lower::pft::FunctionLikeUnit &funit,
- const Fortran::lower::CalleeInterface &callee) {
- assert(builder && "require a builder object at this point");
- using PassBy = Fortran::lower::CalleeInterface::PassEntityBy;
- auto mapPassedEntity = [&](const auto arg) -> void {
- if (arg.passBy == PassBy::AddressAndLength) {
- // TODO: now that fir call has some attributes regarding character
- // return, PassBy::AddressAndLength should be retired.
- mlir::Location loc = toLocation();
- fir::factory::CharacterExprHelper charHelp{*builder, loc};
- mlir::Value box =
- charHelp.createEmboxChar(arg.firArgument, arg.firLength);
- addSymbol(arg.entity->get(), box);
- } else {
- if (arg.entity.has_value()) {
- addSymbol(arg.entity->get(), arg.firArgument);
- } else {
- assert(funit.parentHasHostAssoc());
- funit.parentHostAssoc().internalProcedureBindings(*this,
- localSymbols);
- }
- }
- };
- for (const Fortran::lower::CalleeInterface::PassedEntity &arg :
- callee.getPassedArguments())
- mapPassedEntity(arg);
+ bool addCharSymbol(const Fortran::semantics::SymbolRef sym, mlir::Value val,
+ mlir::Value len, bool forced = false) {
+ if (!forced && lookupSymbol(sym))
+ return false;
+ // TODO: ensure val type is fir.array<len x fir.char<kind>> like. Insert
+ // cast if needed.
+ localSymbols.addCharSymbol(sym, val, len, forced);
+ return true;
+ }
- // Allocate local skeleton instances of dummies from other entry points.
- // Most of these locals will not survive into final generated code, but
- // some will. It is illegal to reference them at run time if they do.
- for (const Fortran::semantics::Symbol *arg :
- funit.nonUniversalDummyArguments) {
- if (lookupSymbol(*arg))
- continue;
- mlir::Type type = genType(*arg);
- // TODO: Account for VALUE arguments (and possibly other variants).
- type = builder->getRefType(type);
- addSymbol(*arg, builder->create<fir::UndefOp>(toLocation(), type));
- }
- if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
- passedResult = callee.getPassedResult()) {
- mapPassedEntity(*passedResult);
- // FIXME: need to make sure things are OK here. addSymbol may not be OK
- if (funit.primaryResult &&
- passedResult->entity->get() != *funit.primaryResult)
- addSymbol(*funit.primaryResult,
- getSymbolAddress(passedResult->entity->get()));
- }
+ fir::ExtendedValue getExtendedValue(Fortran::lower::SymbolBox sb) {
+ return sb.match(
+ [&](const Fortran::lower::SymbolBox::PointerOrAllocatable &box) {
+ return fir::factory::genMutableBoxRead(*builder, getCurrentLocation(),
+ box);
+ },
+ [&sb](auto &) { return sb.toExtendedValue(); });
}
- /// Instantiate variable \p var and add it to the symbol map.
- /// See ConvertVariable.cpp.
- void instantiateVar(const Fortran::lower::pft::Variable &var,
- Fortran::lower::AggregateStoreMap &storeMap) {
- Fortran::lower::instantiateVariable(*this, var, localSymbols, storeMap);
+ static bool isNumericScalarCategory(Fortran::common::TypeCategory cat) {
+ return cat == Fortran::common::TypeCategory::Integer ||
+ cat == Fortran::common::TypeCategory::Real ||
+ cat == Fortran::common::TypeCategory::Complex ||
+ cat == Fortran::common::TypeCategory::Logical;
+ }
+ static bool isLogicalCategory(Fortran::common::TypeCategory cat) {
+ return cat == Fortran::common::TypeCategory::Logical;
+ }
+ static bool isCharacterCategory(Fortran::common::TypeCategory cat) {
+ return cat == Fortran::common::TypeCategory::Character;
+ }
+ static bool isDerivedCategory(Fortran::common::TypeCategory cat) {
+ return cat == Fortran::common::TypeCategory::Derived;
}
- /// Prepare to translate a new function
- void startNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
- assert(!builder && "expected nullptr");
- Fortran::lower::CalleeInterface callee(funit, *this);
- mlir::FuncOp func = callee.addEntryBlockAndMapArguments();
- func.setVisibility(mlir::SymbolTable::Visibility::Public);
- builder = new fir::FirOpBuilder(func, bridge.getKindMap());
- assert(builder && "FirOpBuilder did not instantiate");
- builder->setInsertionPointToStart(&func.front());
+ /// Insert a new block before \p block. Leave the insertion point unchanged.
+ mlir::Block *insertBlock(mlir::Block *block) {
+ mlir::OpBuilder::InsertPoint insertPt = builder->saveInsertionPoint();
+ mlir::Block *newBlock = builder->createBlock(block);
+ builder->restoreInsertionPoint(insertPt);
+ return newBlock;
+ }
- mapDummiesAndResults(funit, callee);
+ mlir::Block *blockOfLabel(Fortran::lower::pft::Evaluation &eval,
+ Fortran::parser::Label label) {
+ const Fortran::lower::pft::LabelEvalMap &labelEvaluationMap =
+ eval.getOwningProcedure()->labelEvaluationMap;
+ const auto iter = labelEvaluationMap.find(label);
+ assert(iter != labelEvaluationMap.end() && "label missing from map");
+ mlir::Block *block = iter->second->block;
+ assert(block && "missing labeled evaluation block");
+ return block;
+ }
- // Note: not storing Variable references because getOrderedSymbolTable
- // below returns a temporary.
- llvm::SmallVector<Fortran::lower::pft::Variable> deferredFuncResultList;
-
- // Backup actual argument for entry character results
- // with different lengths. It needs to be added to the non
- // primary results symbol before mapSymbolAttributes is called.
- Fortran::lower::SymbolBox resultArg;
- if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
- passedResult = callee.getPassedResult())
- resultArg = lookupSymbol(passedResult->entity->get());
-
- Fortran::lower::AggregateStoreMap storeMap;
- // The front-end is currently not adding module variables referenced
- // in a module procedure as host associated. As a result we need to
- // instantiate all module variables here if this is a module procedure.
- // It is likely that the front-end behavior should change here.
- // This also applies to internal procedures inside module procedures.
- if (auto *module = Fortran::lower::pft::getAncestor<
- Fortran::lower::pft::ModuleLikeUnit>(funit))
- for (const Fortran::lower::pft::Variable &var :
- module->getOrderedSymbolTable())
- instantiateVar(var, storeMap);
-
- mlir::Value primaryFuncResultStorage;
- for (const Fortran::lower::pft::Variable &var :
- funit.getOrderedSymbolTable()) {
- // Always instantiate aggregate storage blocks.
- if (var.isAggregateStore()) {
- instantiateVar(var, storeMap);
- continue;
- }
- const Fortran::semantics::Symbol &sym = var.getSymbol();
- if (funit.parentHasHostAssoc()) {
- // Never instantitate host associated variables, as they are already
- // instantiated from an argument tuple. Instead, just bind the symbol to
- // the reference to the host variable, which must be in the map.
- const Fortran::semantics::Symbol &ultimate = sym.GetUltimate();
- if (funit.parentHostAssoc().isAssociated(ultimate)) {
- Fortran::lower::SymbolBox hostBox =
- localSymbols.lookupSymbol(ultimate);
- assert(hostBox && "host association is not in map");
- localSymbols.addSymbol(sym, hostBox.toExtendedValue());
- continue;
- }
- }
- if (!sym.IsFuncResult() || !funit.primaryResult) {
- instantiateVar(var, storeMap);
- } else if (&sym == funit.primaryResult) {
- instantiateVar(var, storeMap);
- primaryFuncResultStorage = getSymbolAddress(sym);
- } else {
- deferredFuncResultList.push_back(var);
- }
- }
-
- // If this is a host procedure with host associations, then create the tuple
- // of pointers for passing to the internal procedures.
- if (!funit.getHostAssoc().empty())
- funit.getHostAssoc().hostProcedureBindings(*this, localSymbols);
-
- /// TODO: should use same mechanism as equivalence?
- /// One blocking point is character entry returns that need special handling
- /// since they are not locally allocated but come as argument. CHARACTER(*)
- /// is not something that fit wells with equivalence lowering.
- for (const Fortran::lower::pft::Variable &altResult :
- deferredFuncResultList) {
- if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
- passedResult = callee.getPassedResult())
- addSymbol(altResult.getSymbol(), resultArg.getAddr());
- Fortran::lower::StatementContext stmtCtx;
- Fortran::lower::mapSymbolAttributes(*this, altResult, localSymbols,
- stmtCtx, primaryFuncResultStorage);
- }
-
- // Create most function blocks in advance.
- createEmptyGlobalBlocks(funit.evaluationList);
-
- // Reinstate entry block as the current insertion point.
- builder->setInsertionPointToEnd(&func.front());
-
- if (callee.hasAlternateReturns()) {
- // Create a local temp to hold the alternate return index.
- // Give it an integer index type and the subroutine name (for dumps).
- // Attach it to the subroutine symbol in the localSymbols map.
- // Initialize it to zero, the "fallthrough" alternate return value.
- const Fortran::semantics::Symbol &symbol = funit.getSubprogramSymbol();
- mlir::Location loc = toLocation();
- mlir::Type idxTy = builder->getIndexType();
- mlir::Value altResult =
- builder->createTemporary(loc, idxTy, toStringRef(symbol.name()));
- addSymbol(symbol, altResult);
- mlir::Value zero = builder->createIntegerConstant(loc, idxTy, 0);
- builder->create<fir::StoreOp>(loc, zero, altResult);
- }
-
- if (Fortran::lower::pft::Evaluation *alternateEntryEval =
- funit.getEntryEval())
- genFIRBranch(alternateEntryEval->lexicalSuccessor->block);
- }
-
- /// Create global blocks for the current function. This eliminates the
- /// distinction between forward and backward targets when generating
- /// branches. A block is "global" if it can be the target of a GOTO or
- /// other source code branch. A block that can only be targeted by a
- /// compiler generated branch is "local". For example, a DO loop preheader
- /// block containing loop initialization code is global. A loop header
- /// block, which is the target of the loop back edge, is local. Blocks
- /// belong to a region. Any block within a nested region must be replaced
- /// with a block belonging to that region. Branches may not cross region
- /// boundaries.
- void createEmptyGlobalBlocks(
- std::list<Fortran::lower::pft::Evaluation> &evaluationList) {
- mlir::Region *region = &builder->getRegion();
- for (Fortran::lower::pft::Evaluation &eval : evaluationList) {
- if (eval.isNewBlock)
- eval.block = builder->createBlock(region);
- if (eval.isConstruct() || eval.isDirective()) {
- if (eval.lowerAsUnstructured()) {
- createEmptyGlobalBlocks(eval.getNestedEvaluations());
- } else if (eval.hasNestedEvaluations()) {
- // A structured construct that is a target starts a new block.
- Fortran::lower::pft::Evaluation &constructStmt =
- eval.getFirstNestedEvaluation();
- if (constructStmt.isNewBlock)
- constructStmt.block = builder->createBlock(region);
- }
- }
- }
- }
-
- /// Lower a procedure (nest).
- void lowerFunc(Fortran::lower::pft::FunctionLikeUnit &funit) {
- if (!funit.isMainProgram()) {
- const Fortran::semantics::Symbol &procSymbol =
- funit.getSubprogramSymbol();
- if (procSymbol.owner().IsSubmodule()) {
- TODO(toLocation(), "support submodules");
- return;
- }
- }
- setCurrentPosition(funit.getStartingSourceLoc());
- for (int entryIndex = 0, last = funit.entryPointList.size();
- entryIndex < last; ++entryIndex) {
- funit.setActiveEntry(entryIndex);
- startNewFunction(funit); // the entry point for lowering this procedure
- for (Fortran::lower::pft::Evaluation &eval : funit.evaluationList)
- genFIR(eval);
- endNewFunction(funit);
- }
- funit.setActiveEntry(0);
- for (Fortran::lower::pft::FunctionLikeUnit &f : funit.nestedFunctions)
- lowerFunc(f); // internal procedure
- }
-
- /// Lower module variable definitions to fir::globalOp and OpenMP/OpenACC
- /// declarative construct.
- void lowerModuleDeclScope(Fortran::lower::pft::ModuleLikeUnit &mod) {
- setCurrentPosition(mod.getStartingSourceLoc());
- createGlobalOutsideOfFunctionLowering([&]() {
- for (const Fortran::lower::pft::Variable &var :
- mod.getOrderedSymbolTable()) {
- // Only define the variables owned by this module.
- const Fortran::semantics::Scope *owningScope = var.getOwningScope();
- if (!owningScope || mod.getScope() == *owningScope)
- Fortran::lower::defineModuleVariable(*this, var);
- }
- for (auto &eval : mod.evaluationList)
- genFIR(eval);
- });
- }
-
- /// Lower functions contained in a module.
- void lowerMod(Fortran::lower::pft::ModuleLikeUnit &mod) {
- for (Fortran::lower::pft::FunctionLikeUnit &f : mod.nestedFunctions)
- lowerFunc(f);
- }
-
- mlir::Value hostAssocTupleValue() override final { return hostAssocTuple; }
-
- /// Record a binding for the ssa-value of the tuple for this function.
- void bindHostAssocTuple(mlir::Value val) override final {
- assert(!hostAssocTuple && val);
- hostAssocTuple = val;
- }
-
- void registerRuntimeTypeInfo(
- mlir::Location loc,
- Fortran::lower::SymbolRef typeInfoSym) override final {
- runtimeTypeInfoConverter.registerTypeInfoSymbol(*this, loc, typeInfoSym);
- }
-
-private:
- FirConverter() = delete;
- FirConverter(const FirConverter &) = delete;
- FirConverter &operator=(const FirConverter &) = delete;
-
- //===--------------------------------------------------------------------===//
- // Helper member functions
- //===--------------------------------------------------------------------===//
-
- mlir::Value createFIRExpr(mlir::Location loc,
- const Fortran::lower::SomeExpr *expr,
- Fortran::lower::StatementContext &stmtCtx) {
- return fir::getBase(genExprValue(*expr, stmtCtx, &loc));
- }
-
- /// Find the symbol in the local map or return null.
- Fortran::lower::SymbolBox
- lookupSymbol(const Fortran::semantics::Symbol &sym) {
- if (Fortran::lower::SymbolBox v = localSymbols.lookupSymbol(sym))
- return v;
- return {};
- }
-
- /// Find the symbol in the inner-most level of the local map or return null.
- Fortran::lower::SymbolBox
- shallowLookupSymbol(const Fortran::semantics::Symbol &sym) {
- if (Fortran::lower::SymbolBox v = localSymbols.shallowLookupSymbol(sym))
- return v;
- return {};
- }
-
- /// Add the symbol to the local map and return `true`. If the symbol is
- /// already in the map and \p forced is `false`, the map is not updated.
- /// Instead the value `false` is returned.
- bool addSymbol(const Fortran::semantics::SymbolRef sym, mlir::Value val,
- bool forced = false) {
- if (!forced && lookupSymbol(sym))
- return false;
- localSymbols.addSymbol(sym, val, forced);
- return true;
- }
-
- bool isNumericScalarCategory(Fortran::common::TypeCategory cat) {
- return cat == Fortran::common::TypeCategory::Integer ||
- cat == Fortran::common::TypeCategory::Real ||
- cat == Fortran::common::TypeCategory::Complex ||
- cat == Fortran::common::TypeCategory::Logical;
- }
- static bool isLogicalCategory(Fortran::common::TypeCategory cat) {
- return cat == Fortran::common::TypeCategory::Logical;
- }
- bool isCharacterCategory(Fortran::common::TypeCategory cat) {
- return cat == Fortran::common::TypeCategory::Character;
- }
- bool isDerivedCategory(Fortran::common::TypeCategory cat) {
- return cat == Fortran::common::TypeCategory::Derived;
- }
-
- /// Insert a new block before \p block. Leave the insertion point unchanged.
- mlir::Block *insertBlock(mlir::Block *block) {
- mlir::OpBuilder::InsertPoint insertPt = builder->saveInsertionPoint();
- mlir::Block *newBlock = builder->createBlock(block);
- builder->restoreInsertionPoint(insertPt);
- return newBlock;
- }
-
- mlir::Block *blockOfLabel(Fortran::lower::pft::Evaluation &eval,
- Fortran::parser::Label label) {
- const Fortran::lower::pft::LabelEvalMap &labelEvaluationMap =
- eval.getOwningProcedure()->labelEvaluationMap;
- const auto iter = labelEvaluationMap.find(label);
- assert(iter != labelEvaluationMap.end() && "label missing from map");
- mlir::Block *block = iter->second->block;
- assert(block && "missing labeled evaluation block");
- return block;
- }
-
- void genFIRBranch(mlir::Block *targetBlock) {
- assert(targetBlock && "missing unconditional target block");
- builder->create<cf::BranchOp>(toLocation(), targetBlock);
- }
+ void genFIRBranch(mlir::Block *targetBlock) {
+ assert(targetBlock && "missing unconditional target block");
+ builder->create<mlir::cf::BranchOp>(toLocation(), targetBlock);
+ }
void genFIRConditionalBranch(mlir::Value cond, mlir::Block *trueTarget,
mlir::Block *falseTarget) {
mlir::Value retval = builder->create<fir::LoadOp>(
toLocation(), getAltReturnResult(symbol));
builder->create<mlir::func::ReturnOp>(toLocation(), retval);
- } else {
- genExitRoutine();
- }
- }
-
- //
- // Statements that have control-flow semantics
- //
-
- /// Generate an If[Then]Stmt condition or its negation.
- template <typename A>
- mlir::Value genIfCondition(const A *stmt, bool negate = false) {
- mlir::Location loc = toLocation();
- Fortran::lower::StatementContext stmtCtx;
- mlir::Value condExpr = createFIRExpr(
- loc,
- Fortran::semantics::GetExpr(
- std::get<Fortran::parser::ScalarLogicalExpr>(stmt->t)),
- stmtCtx);
- stmtCtx.finalize();
- mlir::Value cond =
- builder->createConvert(loc, builder->getI1Type(), condExpr);
- if (negate)
- cond = builder->create<mlir::arith::XOrIOp>(
- loc, cond, builder->createIntegerConstant(loc, cond.getType(), 1));
- return cond;
- }
-
- static bool
- isArraySectionWithoutVectorSubscript(const Fortran::lower::SomeExpr &expr) {
- return expr.Rank() > 0 && Fortran::evaluate::IsVariable(expr) &&
- !Fortran::evaluate::UnwrapWholeSymbolDataRef(expr) &&
- !Fortran::evaluate::HasVectorSubscript(expr);
- }
-
- [[maybe_unused]] static bool
- isFuncResultDesignator(const Fortran::lower::SomeExpr &expr) {
- const Fortran::semantics::Symbol *sym =
- Fortran::evaluate::GetFirstSymbol(expr);
- return sym && sym->IsFuncResult();
- }
-
- static bool isWholeAllocatable(const Fortran::lower::SomeExpr &expr) {
- const Fortran::semantics::Symbol *sym =
- Fortran::evaluate::UnwrapWholeSymbolOrComponentDataRef(expr);
- return sym && Fortran::semantics::IsAllocatable(*sym);
- }
-
- /// Shared for both assignments and pointer assignments.
- void genAssignment(const Fortran::evaluate::Assignment &assign) {
- Fortran::lower::StatementContext stmtCtx;
- mlir::Location loc = toLocation();
- if (explicitIterationSpace()) {
- Fortran::lower::createArrayLoads(*this, explicitIterSpace, localSymbols);
- explicitIterSpace.genLoopNest();
- }
- std::visit(
- Fortran::common::visitors{
- // [1] Plain old assignment.
- [&](const Fortran::evaluate::Assignment::Intrinsic &) {
- const Fortran::semantics::Symbol *sym =
- Fortran::evaluate::GetLastSymbol(assign.lhs);
-
- if (!sym)
- TODO(loc, "assignment to pointer result of function reference");
-
- std::optional<Fortran::evaluate::DynamicType> lhsType =
- assign.lhs.GetType();
- assert(lhsType && "lhs cannot be typeless");
- // Assignment to polymorphic allocatables may require changing the
- // variable dynamic type (See Fortran 2018 10.2.1.3 p3).
- if (lhsType->IsPolymorphic() && isWholeAllocatable(assign.lhs))
- TODO(loc, "assignment to polymorphic allocatable");
-
- // Note: No ad-hoc handling for pointers is required here. The
- // target will be assigned as per 2018 10.2.1.3 p2. genExprAddr
- // on a pointer returns the target address and not the address of
- // the pointer variable.
-
- if (assign.lhs.Rank() > 0 || explicitIterationSpace()) {
- // Array assignment
- // See Fortran 2018 10.2.1.3 p5, p6, and p7
- genArrayAssignment(assign, stmtCtx);
- return;
- }
-
- // Scalar assignment
- const bool isNumericScalar =
- isNumericScalarCategory(lhsType->category());
- fir::ExtendedValue rhs = isNumericScalar
- ? genExprValue(assign.rhs, stmtCtx)
- : genExprAddr(assign.rhs, stmtCtx);
- bool lhsIsWholeAllocatable = isWholeAllocatable(assign.lhs);
- llvm::Optional<fir::factory::MutableBoxReallocation> lhsRealloc;
- llvm::Optional<fir::MutableBoxValue> lhsMutableBox;
- auto lhs = [&]() -> fir::ExtendedValue {
- if (lhsIsWholeAllocatable) {
- lhsMutableBox = genExprMutableBox(loc, assign.lhs);
- llvm::SmallVector<mlir::Value> lengthParams;
- if (const fir::CharBoxValue *charBox = rhs.getCharBox())
- lengthParams.push_back(charBox->getLen());
- else if (fir::isDerivedWithLengthParameters(rhs))
- TODO(loc, "assignment to derived type allocatable with "
- "length parameters");
- lhsRealloc = fir::factory::genReallocIfNeeded(
- *builder, loc, *lhsMutableBox,
- /*shape=*/llvm::None, lengthParams);
- return lhsRealloc->newValue;
- }
- return genExprAddr(assign.lhs, stmtCtx);
- }();
-
- if (isNumericScalar) {
- // Fortran 2018 10.2.1.3 p8 and p9
- // Conversions should have been inserted by semantic analysis,
- // but they can be incorrect between the rhs and lhs. Correct
- // that here.
- mlir::Value addr = fir::getBase(lhs);
- mlir::Value val = fir::getBase(rhs);
- // A function with multiple entry points returning different
- // types tags all result variables with one of the largest
- // types to allow them to share the same storage. Assignment
- // to a result variable of one of the other types requires
- // conversion to the actual type.
- mlir::Type toTy = genType(assign.lhs);
- mlir::Value cast =
- builder->convertWithSemantics(loc, toTy, val);
- if (fir::dyn_cast_ptrEleTy(addr.getType()) != toTy) {
- assert(isFuncResultDesignator(assign.lhs) && "type mismatch");
- addr = builder->createConvert(
- toLocation(), builder->getRefType(toTy), addr);
- }
- builder->create<fir::StoreOp>(loc, cast, addr);
- } else if (isCharacterCategory(lhsType->category())) {
- // Fortran 2018 10.2.1.3 p10 and p11
- fir::factory::CharacterExprHelper{*builder, loc}.createAssign(
- lhs, rhs);
- } else if (isDerivedCategory(lhsType->category())) {
- // Fortran 2018 10.2.1.3 p13 and p14
- // Recursively gen an assignment on each element pair.
- fir::factory::genRecordAssignment(*builder, loc, lhs, rhs);
- } else {
- llvm_unreachable("unknown category");
- }
- if (lhsIsWholeAllocatable)
- fir::factory::finalizeRealloc(
- *builder, loc, lhsMutableBox.getValue(),
- /*lbounds=*/llvm::None, /*takeLboundsIfRealloc=*/false,
- lhsRealloc.getValue());
- },
-
- // [2] User defined assignment. If the context is a scalar
- // expression then call the procedure.
- [&](const Fortran::evaluate::ProcedureRef &procRef) {
- Fortran::lower::StatementContext &ctx =
- explicitIterationSpace() ? explicitIterSpace.stmtContext()
- : stmtCtx;
- Fortran::lower::createSubroutineCall(
- *this, procRef, explicitIterSpace, implicitIterSpace,
- localSymbols, ctx, /*isUserDefAssignment=*/true);
- },
-
- // [3] Pointer assignment with possibly empty bounds-spec. R1035: a
- // bounds-spec is a lower bound value.
- [&](const Fortran::evaluate::Assignment::BoundsSpec &lbExprs) {
- if (IsProcedure(assign.rhs))
- TODO(loc, "procedure pointer assignment");
- std::optional<Fortran::evaluate::DynamicType> lhsType =
- assign.lhs.GetType();
- std::optional<Fortran::evaluate::DynamicType> rhsType =
- assign.rhs.GetType();
- // Polymorphic lhs/rhs may need more care. See F2018 10.2.2.3.
- if ((lhsType && lhsType->IsPolymorphic()) ||
- (rhsType && rhsType->IsPolymorphic()))
- TODO(loc, "pointer assignment involving polymorphic entity");
-
- // FIXME: in the explicit space context, we want to use
- // ScalarArrayExprLowering here.
- fir::MutableBoxValue lhs = genExprMutableBox(loc, assign.lhs);
- llvm::SmallVector<mlir::Value> lbounds;
- for (const Fortran::evaluate::ExtentExpr &lbExpr : lbExprs)
- lbounds.push_back(
- fir::getBase(genExprValue(toEvExpr(lbExpr), stmtCtx)));
- Fortran::lower::associateMutableBox(*this, loc, lhs, assign.rhs,
- lbounds, stmtCtx);
- if (explicitIterationSpace()) {
- mlir::ValueRange inners = explicitIterSpace.getInnerArgs();
- if (!inners.empty()) {
- // TODO: should force a copy-in/copy-out here.
- // e.g., obj%ptr(i+1) => obj%ptr(i)
- builder->create<fir::ResultOp>(loc, inners);
- }
- }
- },
+ } else {
+ genExitRoutine();
+ }
+ }
- // [4] Pointer assignment with bounds-remapping. R1036: a
- // bounds-remapping is a pair, lower bound and upper bound.
- [&](const Fortran::evaluate::Assignment::BoundsRemapping
- &boundExprs) {
- std::optional<Fortran::evaluate::DynamicType> lhsType =
- assign.lhs.GetType();
- std::optional<Fortran::evaluate::DynamicType> rhsType =
- assign.rhs.GetType();
- // Polymorphic lhs/rhs may need more care. See F2018 10.2.2.3.
- if ((lhsType && lhsType->IsPolymorphic()) ||
- (rhsType && rhsType->IsPolymorphic()))
- TODO(loc, "pointer assignment involving polymorphic entity");
+ //
+ // Statements that have control-flow semantics
+ //
- // FIXME: in the explicit space context, we want to use
- // ScalarArrayExprLowering here.
- fir::MutableBoxValue lhs = genExprMutableBox(loc, assign.lhs);
- if (Fortran::evaluate::UnwrapExpr<Fortran::evaluate::NullPointer>(
- assign.rhs)) {
- fir::factory::disassociateMutableBox(*builder, loc, lhs);
- return;
- }
- llvm::SmallVector<mlir::Value> lbounds;
- llvm::SmallVector<mlir::Value> ubounds;
- for (const std::pair<Fortran::evaluate::ExtentExpr,
- Fortran::evaluate::ExtentExpr> &pair :
- boundExprs) {
- const Fortran::evaluate::ExtentExpr &lbExpr = pair.first;
- const Fortran::evaluate::ExtentExpr &ubExpr = pair.second;
- lbounds.push_back(
- fir::getBase(genExprValue(toEvExpr(lbExpr), stmtCtx)));
- ubounds.push_back(
- fir::getBase(genExprValue(toEvExpr(ubExpr), stmtCtx)));
- }
- // Do not generate a temp in case rhs is an array section.
- fir::ExtendedValue rhs =
- isArraySectionWithoutVectorSubscript(assign.rhs)
- ? Fortran::lower::createSomeArrayBox(
- *this, assign.rhs, localSymbols, stmtCtx)
- : genExprAddr(assign.rhs, stmtCtx);
- fir::factory::associateMutableBoxWithRemap(*builder, loc, lhs,
- rhs, lbounds, ubounds);
- if (explicitIterationSpace()) {
- mlir::ValueRange inners = explicitIterSpace.getInnerArgs();
- if (!inners.empty()) {
- // TODO: should force a copy-in/copy-out here.
- // e.g., obj%ptr(i+1) => obj%ptr(i)
- builder->create<fir::ResultOp>(loc, inners);
- }
- }
- },
- },
- assign.u);
- if (explicitIterationSpace())
- Fortran::lower::createArrayMergeStores(*this, explicitIterSpace);
+ /// Generate an If[Then]Stmt condition or its negation.
+ template <typename A>
+ mlir::Value genIfCondition(const A *stmt, bool negate = false) {
+ mlir::Location loc = toLocation();
+ Fortran::lower::StatementContext stmtCtx;
+ mlir::Value condExpr = createFIRExpr(
+ loc,
+ Fortran::semantics::GetExpr(
+ std::get<Fortran::parser::ScalarLogicalExpr>(stmt->t)),
+ stmtCtx);
+ stmtCtx.finalize();
+ mlir::Value cond =
+ builder->createConvert(loc, builder->getI1Type(), condExpr);
+ if (negate)
+ cond = builder->create<mlir::arith::XOrIOp>(
+ loc, cond, builder->createIntegerConstant(loc, cond.getType(), 1));
+ return cond;
+ }
+
+ mlir::FuncOp getFunc(llvm::StringRef name, mlir::FunctionType ty) {
+ if (mlir::FuncOp func = builder->getNamedFunction(name)) {
+ assert(func.getFunctionType() == ty);
+ return func;
+ }
+ return builder->createFunction(toLocation(), name, ty);
}
/// Lowering of CALL statement
if (exprType.isSignlessInteger()) {
// Arithmetic expression has Integer type. Generate a SelectCaseOp
// with ranges {(-inf:-1], 0=default, [1:inf)}.
- MLIRContext *context = builder->getContext();
+ mlir::MLIRContext *context = builder->getContext();
llvm::SmallVector<mlir::Attribute> attrList;
llvm::SmallVector<mlir::Value> valueList;
llvm::SmallVector<mlir::Block *> blockList;
builder->create<fir::SelectOp>(loc, selectExpr, indexList, blockList);
}
+ /// Generate FIR for a DO construct. There are six variants:
+ /// - unstructured infinite and while loops
+ /// - structured and unstructured increment loops
+ /// - structured and unstructured concurrent loops
void genFIR(const Fortran::parser::DoConstruct &doConstruct) {
TODO(toLocation(), "DoConstruct lowering");
}
+ /// Generate structured or unstructured FIR for an IF construct.
+ /// The initial statement may be either an IfStmt or an IfThenStmt.
void genFIR(const Fortran::parser::IfConstruct &) {
mlir::Location loc = toLocation();
Fortran::lower::pft::Evaluation &eval = getEval();
builder->restoreInsertionPoint(insertPt);
}
- void genFIR(const Fortran::parser::OpenMPDeclarativeConstruct &) {
+ void genFIR(const Fortran::parser::OpenMPDeclarativeConstruct &ompDecl) {
TODO(toLocation(), "OpenMPDeclarativeConstruct lowering");
}
/// The type may be CHARACTER, INTEGER, or LOGICAL.
void genFIR(const Fortran::parser::SelectCaseStmt &stmt) {
Fortran::lower::pft::Evaluation &eval = getEval();
- MLIRContext *context = builder->getContext();
+ mlir::MLIRContext *context = builder->getContext();
mlir::Location loc = toLocation();
Fortran::lower::StatementContext stmtCtx;
const Fortran::lower::SomeExpr *expr = Fortran::semantics::GetExpr(
}
void genFIR(const Fortran::parser::BlockConstruct &blockConstruct) {
+ setCurrentPositionAt(blockConstruct);
TODO(toLocation(), "BlockConstruct lowering");
}
-
void genFIR(const Fortran::parser::BlockStmt &) {
TODO(toLocation(), "BlockStmt lowering");
}
-
void genFIR(const Fortran::parser::EndBlockStmt &) {
TODO(toLocation(), "EndBlockStmt lowering");
}
void genFIR(const Fortran::parser::ChangeTeamConstruct &construct) {
TODO(toLocation(), "ChangeTeamConstruct lowering");
}
-
void genFIR(const Fortran::parser::ChangeTeamStmt &stmt) {
TODO(toLocation(), "ChangeTeamStmt lowering");
}
-
void genFIR(const Fortran::parser::EndChangeTeamStmt &stmt) {
TODO(toLocation(), "EndChangeTeamStmt lowering");
}
void genFIR(const Fortran::parser::CriticalConstruct &criticalConstruct) {
+ setCurrentPositionAt(criticalConstruct);
TODO(toLocation(), "CriticalConstruct lowering");
}
-
void genFIR(const Fortran::parser::CriticalStmt &) {
TODO(toLocation(), "CriticalStmt lowering");
}
-
void genFIR(const Fortran::parser::EndCriticalStmt &) {
TODO(toLocation(), "EndCriticalStmt lowering");
}
void genFIR(const Fortran::parser::SelectRankConstruct &selectRankConstruct) {
+ setCurrentPositionAt(selectRankConstruct);
TODO(toLocation(), "SelectRankConstruct lowering");
}
-
void genFIR(const Fortran::parser::SelectRankStmt &) {
TODO(toLocation(), "SelectRankStmt lowering");
}
-
void genFIR(const Fortran::parser::SelectRankCaseStmt &) {
TODO(toLocation(), "SelectRankCaseStmt lowering");
}
void genFIR(const Fortran::parser::SelectTypeConstruct &selectTypeConstruct) {
+ setCurrentPositionAt(selectTypeConstruct);
TODO(toLocation(), "SelectTypeConstruct lowering");
}
-
void genFIR(const Fortran::parser::SelectTypeStmt &) {
TODO(toLocation(), "SelectTypeStmt lowering");
}
-
void genFIR(const Fortran::parser::TypeGuardStmt &) {
TODO(toLocation(), "TypeGuardStmt lowering");
}
mlir::Value iostat = genBackspaceStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::CloseStmt &stmt) {
mlir::Value iostat = genCloseStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::EndfileStmt &stmt) {
mlir::Value iostat = genEndfileStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::FlushStmt &stmt) {
mlir::Value iostat = genFlushStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::InquireStmt &stmt) {
mlir::Value iostat = genInquireStatement(*this, stmt);
if (const auto *specs =
std::get_if<std::list<Fortran::parser::InquireSpec>>(&stmt.u))
genIoConditionBranches(getEval(), *specs, iostat);
}
-
void genFIR(const Fortran::parser::OpenStmt &stmt) {
mlir::Value iostat = genOpenStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::PrintStmt &stmt) {
genPrintStatement(*this, stmt);
}
-
void genFIR(const Fortran::parser::ReadStmt &stmt) {
mlir::Value iostat = genReadStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.controls, iostat);
}
-
void genFIR(const Fortran::parser::RewindStmt &stmt) {
mlir::Value iostat = genRewindStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::WaitStmt &stmt) {
mlir::Value iostat = genWaitStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.v, iostat);
}
-
void genFIR(const Fortran::parser::WriteStmt &stmt) {
mlir::Value iostat = genWriteStatement(*this, stmt);
genIoConditionBranches(getEval(), stmt.controls, iostat);
TODO(toLocation(), "LockStmt lowering");
}
- /// Return true if the current context is a conditionalized and implied
- /// iteration space.
- bool implicitIterationSpace() { return !implicitIterSpace.empty(); }
+ fir::ExtendedValue
+ genInitializerExprValue(const Fortran::lower::SomeExpr &expr,
+ Fortran::lower::StatementContext &stmtCtx) {
+ return Fortran::lower::createSomeInitializerExpression(
+ toLocation(), *this, expr, localSymbols, stmtCtx);
+ }
+
+ /// Return true if the current context is a conditionalized and implied
+ /// iteration space.
+ bool implicitIterationSpace() { return !implicitIterSpace.empty(); }
+
+ /// Return true if context is currently an explicit iteration space. A scalar
+ /// assignment expression may be contextually within a user-defined iteration
+ /// space, transforming it into an array expression.
+ bool explicitIterationSpace() { return explicitIterSpace.isActive(); }
+
+ /// Generate an array assignment.
+ /// This is an assignment expression with rank > 0. The assignment may or may
+ /// not be in a WHERE and/or FORALL context.
+ void genArrayAssignment(const Fortran::evaluate::Assignment &assign,
+ Fortran::lower::StatementContext &stmtCtx) {
+ if (isWholeAllocatable(assign.lhs)) {
+ // Assignment to allocatables may require the lhs to be
+ // deallocated/reallocated. See Fortran 2018 10.2.1.3 p3
+ Fortran::lower::createAllocatableArrayAssignment(
+ *this, assign.lhs, assign.rhs, explicitIterSpace, implicitIterSpace,
+ localSymbols, stmtCtx);
+ return;
+ }
+
+ if (!implicitIterationSpace() && !explicitIterationSpace()) {
+ // No masks and the iteration space is implied by the array, so create a
+ // simple array assignment.
+ Fortran::lower::createSomeArrayAssignment(*this, assign.lhs, assign.rhs,
+ localSymbols, stmtCtx);
+ return;
+ }
+
+ // If there is an explicit iteration space, generate an array assignment
+ // with a user-specified iteration space and possibly with masks. These
+ // assignments may *appear* to be scalar expressions, but the scalar
+ // expression is evaluated at all points in the user-defined space much like
+ // an ordinary array assignment. More specifically, the semantics inside the
+ // FORALL much more closely resembles that of WHERE than a scalar
+ // assignment.
+ // Otherwise, generate a masked array assignment. The iteration space is
+ // implied by the lhs array expression.
+ Fortran::lower::createAnyMaskedArrayAssignment(
+ *this, assign.lhs, assign.rhs, explicitIterSpace, implicitIterSpace,
+ localSymbols,
+ explicitIterationSpace() ? explicitIterSpace.stmtContext()
+ : implicitIterSpace.stmtContext());
+ }
+
+ static bool
+ isArraySectionWithoutVectorSubscript(const Fortran::lower::SomeExpr &expr) {
+ return expr.Rank() > 0 && Fortran::evaluate::IsVariable(expr) &&
+ !Fortran::evaluate::UnwrapWholeSymbolDataRef(expr) &&
+ !Fortran::evaluate::HasVectorSubscript(expr);
+ }
+
+#if !defined(NDEBUG)
+ static bool isFuncResultDesignator(const Fortran::lower::SomeExpr &expr) {
+ const Fortran::semantics::Symbol *sym =
+ Fortran::evaluate::GetFirstSymbol(expr);
+ return sym && sym->IsFuncResult();
+ }
+#endif
+
+ static bool isWholeAllocatable(const Fortran::lower::SomeExpr &expr) {
+ const Fortran::semantics::Symbol *sym =
+ Fortran::evaluate::UnwrapWholeSymbolOrComponentDataRef(expr);
+ return sym && Fortran::semantics::IsAllocatable(*sym);
+ }
+
+ /// Shared for both assignments and pointer assignments.
+ void genAssignment(const Fortran::evaluate::Assignment &assign) {
+ Fortran::lower::StatementContext stmtCtx;
+ mlir::Location loc = toLocation();
+ if (explicitIterationSpace()) {
+ Fortran::lower::createArrayLoads(*this, explicitIterSpace, localSymbols);
+ explicitIterSpace.genLoopNest();
+ }
+ std::visit(
+ Fortran::common::visitors{
+ // [1] Plain old assignment.
+ [&](const Fortran::evaluate::Assignment::Intrinsic &) {
+ const Fortran::semantics::Symbol *sym =
+ Fortran::evaluate::GetLastSymbol(assign.lhs);
+
+ if (!sym)
+ TODO(loc, "assignment to pointer result of function reference");
+
+ std::optional<Fortran::evaluate::DynamicType> lhsType =
+ assign.lhs.GetType();
+ assert(lhsType && "lhs cannot be typeless");
+ // Assignment to polymorphic allocatables may require changing the
+ // variable dynamic type (See Fortran 2018 10.2.1.3 p3).
+ if (lhsType->IsPolymorphic() && isWholeAllocatable(assign.lhs))
+ TODO(loc, "assignment to polymorphic allocatable");
+
+ // Note: No ad-hoc handling for pointers is required here. The
+ // target will be assigned as per 2018 10.2.1.3 p2. genExprAddr
+ // on a pointer returns the target address and not the address of
+ // the pointer variable.
+
+ if (assign.lhs.Rank() > 0 || explicitIterationSpace()) {
+ // Array assignment
+ // See Fortran 2018 10.2.1.3 p5, p6, and p7
+ genArrayAssignment(assign, stmtCtx);
+ return;
+ }
+
+ // Scalar assignment
+ const bool isNumericScalar =
+ isNumericScalarCategory(lhsType->category());
+ fir::ExtendedValue rhs = isNumericScalar
+ ? genExprValue(assign.rhs, stmtCtx)
+ : genExprAddr(assign.rhs, stmtCtx);
+ bool lhsIsWholeAllocatable = isWholeAllocatable(assign.lhs);
+ llvm::Optional<fir::factory::MutableBoxReallocation> lhsRealloc;
+ llvm::Optional<fir::MutableBoxValue> lhsMutableBox;
+ auto lhs = [&]() -> fir::ExtendedValue {
+ if (lhsIsWholeAllocatable) {
+ lhsMutableBox = genExprMutableBox(loc, assign.lhs);
+ llvm::SmallVector<mlir::Value> lengthParams;
+ if (const fir::CharBoxValue *charBox = rhs.getCharBox())
+ lengthParams.push_back(charBox->getLen());
+ else if (fir::isDerivedWithLengthParameters(rhs))
+ TODO(loc, "assignment to derived type allocatable with "
+ "length parameters");
+ lhsRealloc = fir::factory::genReallocIfNeeded(
+ *builder, loc, *lhsMutableBox,
+ /*shape=*/llvm::None, lengthParams);
+ return lhsRealloc->newValue;
+ }
+ return genExprAddr(assign.lhs, stmtCtx);
+ }();
+
+ if (isNumericScalar) {
+ // Fortran 2018 10.2.1.3 p8 and p9
+ // Conversions should have been inserted by semantic analysis,
+ // but they can be incorrect between the rhs and lhs. Correct
+ // that here.
+ mlir::Value addr = fir::getBase(lhs);
+ mlir::Value val = fir::getBase(rhs);
+ // A function with multiple entry points returning different
+ // types tags all result variables with one of the largest
+ // types to allow them to share the same storage. Assignment
+ // to a result variable of one of the other types requires
+ // conversion to the actual type.
+ mlir::Type toTy = genType(assign.lhs);
+ mlir::Value cast =
+ builder->convertWithSemantics(loc, toTy, val);
+ if (fir::dyn_cast_ptrEleTy(addr.getType()) != toTy) {
+ assert(isFuncResultDesignator(assign.lhs) && "type mismatch");
+ addr = builder->createConvert(
+ toLocation(), builder->getRefType(toTy), addr);
+ }
+ builder->create<fir::StoreOp>(loc, cast, addr);
+ } else if (isCharacterCategory(lhsType->category())) {
+ // Fortran 2018 10.2.1.3 p10 and p11
+ fir::factory::CharacterExprHelper{*builder, loc}.createAssign(
+ lhs, rhs);
+ } else if (isDerivedCategory(lhsType->category())) {
+ // Fortran 2018 10.2.1.3 p13 and p14
+ // Recursively gen an assignment on each element pair.
+ fir::factory::genRecordAssignment(*builder, loc, lhs, rhs);
+ } else {
+ llvm_unreachable("unknown category");
+ }
+ if (lhsIsWholeAllocatable)
+ fir::factory::finalizeRealloc(
+ *builder, loc, lhsMutableBox.getValue(),
+ /*lbounds=*/llvm::None, /*takeLboundsIfRealloc=*/false,
+ lhsRealloc.getValue());
+ },
+
+ // [2] User defined assignment. If the context is a scalar
+ // expression then call the procedure.
+ [&](const Fortran::evaluate::ProcedureRef &procRef) {
+ Fortran::lower::StatementContext &ctx =
+ explicitIterationSpace() ? explicitIterSpace.stmtContext()
+ : stmtCtx;
+ Fortran::lower::createSubroutineCall(
+ *this, procRef, explicitIterSpace, implicitIterSpace,
+ localSymbols, ctx, /*isUserDefAssignment=*/true);
+ },
- /// Return true if context is currently an explicit iteration space. A scalar
- /// assignment expression may be contextually within a user-defined iteration
- /// space, transforming it into an array expression.
- bool explicitIterationSpace() { return explicitIterSpace.isActive(); }
+ // [3] Pointer assignment with possibly empty bounds-spec. R1035: a
+ // bounds-spec is a lower bound value.
+ [&](const Fortran::evaluate::Assignment::BoundsSpec &lbExprs) {
+ if (IsProcedure(assign.rhs))
+ TODO(loc, "procedure pointer assignment");
+ std::optional<Fortran::evaluate::DynamicType> lhsType =
+ assign.lhs.GetType();
+ std::optional<Fortran::evaluate::DynamicType> rhsType =
+ assign.rhs.GetType();
+ // Polymorphic lhs/rhs may need more care. See F2018 10.2.2.3.
+ if ((lhsType && lhsType->IsPolymorphic()) ||
+ (rhsType && rhsType->IsPolymorphic()))
+ TODO(loc, "pointer assignment involving polymorphic entity");
- /// Generate an array assignment.
- /// This is an assignment expression with rank > 0. The assignment may or may
- /// not be in a WHERE and/or FORALL context.
- void genArrayAssignment(const Fortran::evaluate::Assignment &assign,
- Fortran::lower::StatementContext &stmtCtx) {
- if (isWholeAllocatable(assign.lhs)) {
- // Assignment to allocatables may require the lhs to be
- // deallocated/reallocated. See Fortran 2018 10.2.1.3 p3
- Fortran::lower::createAllocatableArrayAssignment(
- *this, assign.lhs, assign.rhs, explicitIterSpace, implicitIterSpace,
- localSymbols, stmtCtx);
- return;
- }
+ // FIXME: in the explicit space context, we want to use
+ // ScalarArrayExprLowering here.
+ fir::MutableBoxValue lhs = genExprMutableBox(loc, assign.lhs);
+ llvm::SmallVector<mlir::Value> lbounds;
+ for (const Fortran::evaluate::ExtentExpr &lbExpr : lbExprs)
+ lbounds.push_back(
+ fir::getBase(genExprValue(toEvExpr(lbExpr), stmtCtx)));
+ Fortran::lower::associateMutableBox(*this, loc, lhs, assign.rhs,
+ lbounds, stmtCtx);
+ if (explicitIterationSpace()) {
+ mlir::ValueRange inners = explicitIterSpace.getInnerArgs();
+ if (!inners.empty()) {
+ // TODO: should force a copy-in/copy-out here.
+ // e.g., obj%ptr(i+1) => obj%ptr(i)
+ builder->create<fir::ResultOp>(loc, inners);
+ }
+ }
+ },
- if (!implicitIterationSpace() && !explicitIterationSpace()) {
- // No masks and the iteration space is implied by the array, so create a
- // simple array assignment.
- Fortran::lower::createSomeArrayAssignment(*this, assign.lhs, assign.rhs,
- localSymbols, stmtCtx);
- return;
- }
+ // [4] Pointer assignment with bounds-remapping. R1036: a
+ // bounds-remapping is a pair, lower bound and upper bound.
+ [&](const Fortran::evaluate::Assignment::BoundsRemapping
+ &boundExprs) {
+ std::optional<Fortran::evaluate::DynamicType> lhsType =
+ assign.lhs.GetType();
+ std::optional<Fortran::evaluate::DynamicType> rhsType =
+ assign.rhs.GetType();
+ // Polymorphic lhs/rhs may need more care. See F2018 10.2.2.3.
+ if ((lhsType && lhsType->IsPolymorphic()) ||
+ (rhsType && rhsType->IsPolymorphic()))
+ TODO(loc, "pointer assignment involving polymorphic entity");
- // If there is an explicit iteration space, generate an array assignment
- // with a user-specified iteration space and possibly with masks. These
- // assignments may *appear* to be scalar expressions, but the scalar
- // expression is evaluated at all points in the user-defined space much like
- // an ordinary array assignment. More specifically, the semantics inside the
- // FORALL much more closely resembles that of WHERE than a scalar
- // assignment.
- // Otherwise, generate a masked array assignment. The iteration space is
- // implied by the lhs array expression.
- Fortran::lower::createAnyMaskedArrayAssignment(
- *this, assign.lhs, assign.rhs, explicitIterSpace, implicitIterSpace,
- localSymbols,
- explicitIterationSpace() ? explicitIterSpace.stmtContext()
- : implicitIterSpace.stmtContext());
+ // FIXME: in the explicit space context, we want to use
+ // ScalarArrayExprLowering here.
+ fir::MutableBoxValue lhs = genExprMutableBox(loc, assign.lhs);
+ if (Fortran::evaluate::UnwrapExpr<Fortran::evaluate::NullPointer>(
+ assign.rhs)) {
+ fir::factory::disassociateMutableBox(*builder, loc, lhs);
+ return;
+ }
+ llvm::SmallVector<mlir::Value> lbounds;
+ llvm::SmallVector<mlir::Value> ubounds;
+ for (const std::pair<Fortran::evaluate::ExtentExpr,
+ Fortran::evaluate::ExtentExpr> &pair :
+ boundExprs) {
+ const Fortran::evaluate::ExtentExpr &lbExpr = pair.first;
+ const Fortran::evaluate::ExtentExpr &ubExpr = pair.second;
+ lbounds.push_back(
+ fir::getBase(genExprValue(toEvExpr(lbExpr), stmtCtx)));
+ ubounds.push_back(
+ fir::getBase(genExprValue(toEvExpr(ubExpr), stmtCtx)));
+ }
+ // Do not generate a temp in case rhs is an array section.
+ fir::ExtendedValue rhs =
+ isArraySectionWithoutVectorSubscript(assign.rhs)
+ ? Fortran::lower::createSomeArrayBox(
+ *this, assign.rhs, localSymbols, stmtCtx)
+ : genExprAddr(assign.rhs, stmtCtx);
+ fir::factory::associateMutableBoxWithRemap(*builder, loc, lhs,
+ rhs, lbounds, ubounds);
+ if (explicitIterationSpace()) {
+ mlir::ValueRange inners = explicitIterSpace.getInnerArgs();
+ if (!inners.empty()) {
+ // TODO: should force a copy-in/copy-out here.
+ // e.g., obj%ptr(i+1) => obj%ptr(i)
+ builder->create<fir::ResultOp>(loc, inners);
+ }
+ }
+ },
+ },
+ assign.u);
+ if (explicitIterationSpace())
+ Fortran::lower::createArrayMergeStores(*this, explicitIterSpace);
}
void genFIR(const Fortran::parser::WhereConstruct &c) {
implicitIterSpace.append(Fortran::semantics::GetExpr(
std::get<Fortran::parser::LogicalExpr>(stmt.t)));
}
- void genFIR(const Fortran::parser::WhereConstruct::Elsewhere &ew) {
- genNestedStatement(
- std::get<Fortran::parser::Statement<Fortran::parser::ElsewhereStmt>>(
- ew.t));
- for (const auto &body :
- std::get<std::list<Fortran::parser::WhereBodyConstruct>>(ew.t))
- genFIR(body);
+ void genFIR(const Fortran::parser::WhereConstruct::Elsewhere &ew) {
+ genNestedStatement(
+ std::get<Fortran::parser::Statement<Fortran::parser::ElsewhereStmt>>(
+ ew.t));
+ for (const auto &body :
+ std::get<std::list<Fortran::parser::WhereBodyConstruct>>(ew.t))
+ genFIR(body);
+ }
+ void genFIR(const Fortran::parser::ElsewhereStmt &stmt) {
+ implicitIterSpace.append(nullptr);
+ }
+ void genFIR(const Fortran::parser::EndWhereStmt &) {
+ implicitIterSpace.shrinkStack();
+ }
+
+ void genFIR(const Fortran::parser::WhereStmt &stmt) {
+ Fortran::lower::StatementContext stmtCtx;
+ const auto &assign = std::get<Fortran::parser::AssignmentStmt>(stmt.t);
+ implicitIterSpace.growStack();
+ implicitIterSpace.append(Fortran::semantics::GetExpr(
+ std::get<Fortran::parser::LogicalExpr>(stmt.t)));
+ genAssignment(*assign.typedAssignment->v);
+ implicitIterSpace.shrinkStack();
+ }
+
+ void genFIR(const Fortran::parser::PointerAssignmentStmt &stmt) {
+ genAssignment(*stmt.typedAssignment->v);
+ }
+
+ void genFIR(const Fortran::parser::AssignmentStmt &stmt) {
+ genAssignment(*stmt.typedAssignment->v);
+ }
+
+ void genFIR(const Fortran::parser::SyncAllStmt &stmt) {
+ TODO(toLocation(), "SyncAllStmt lowering");
+ }
+
+ void genFIR(const Fortran::parser::SyncImagesStmt &stmt) {
+ TODO(toLocation(), "SyncImagesStmt lowering");
+ }
+
+ void genFIR(const Fortran::parser::SyncMemoryStmt &stmt) {
+ TODO(toLocation(), "SyncMemoryStmt lowering");
+ }
+
+ void genFIR(const Fortran::parser::SyncTeamStmt &stmt) {
+ TODO(toLocation(), "SyncTeamStmt lowering");
+ }
+
+ void genFIR(const Fortran::parser::UnlockStmt &stmt) {
+ TODO(toLocation(), "UnlockStmt lowering");
+ }
+
+ void genFIR(const Fortran::parser::AssignStmt &stmt) {
+ const Fortran::semantics::Symbol &symbol =
+ *std::get<Fortran::parser::Name>(stmt.t).symbol;
+ mlir::Location loc = toLocation();
+ mlir::Value labelValue = builder->createIntegerConstant(
+ loc, genType(symbol), std::get<Fortran::parser::Label>(stmt.t));
+ builder->create<fir::StoreOp>(loc, labelValue, getSymbolAddress(symbol));
+ }
+
+ void genFIR(const Fortran::parser::FormatStmt &) {
+ // do nothing.
+
+ // FORMAT statements have no semantics. They may be lowered if used by a
+ // data transfer statement.
+ }
+
+ void genFIR(const Fortran::parser::PauseStmt &stmt) {
+ genPauseStatement(*this, stmt);
+ }
+
+ // call FAIL IMAGE in runtime
+ void genFIR(const Fortran::parser::FailImageStmt &stmt) {
+ TODO(toLocation(), "FailImageStmt lowering");
+ }
+
+ // call STOP, ERROR STOP in runtime
+ void genFIR(const Fortran::parser::StopStmt &stmt) {
+ genStopStatement(*this, stmt);
+ }
+
+ void genFIR(const Fortran::parser::ReturnStmt &stmt) {
+ Fortran::lower::pft::FunctionLikeUnit *funit =
+ getEval().getOwningProcedure();
+ assert(funit && "not inside main program, function or subroutine");
+ if (funit->isMainProgram()) {
+ genExitRoutine();
+ return;
+ }
+ mlir::Location loc = toLocation();
+ if (stmt.v) {
+ // Alternate return statement - If this is a subroutine where some
+ // alternate entries have alternate returns, but the active entry point
+ // does not, ignore the alternate return value. Otherwise, assign it
+ // to the compiler-generated result variable.
+ const Fortran::semantics::Symbol &symbol = funit->getSubprogramSymbol();
+ if (Fortran::semantics::HasAlternateReturns(symbol)) {
+ Fortran::lower::StatementContext stmtCtx;
+ const Fortran::lower::SomeExpr *expr =
+ Fortran::semantics::GetExpr(*stmt.v);
+ assert(expr && "missing alternate return expression");
+ mlir::Value altReturnIndex = builder->createConvert(
+ loc, builder->getIndexType(), createFIRExpr(loc, expr, stmtCtx));
+ builder->create<fir::StoreOp>(loc, altReturnIndex,
+ getAltReturnResult(symbol));
+ }
+ }
+ // Branch to the last block of the SUBROUTINE, which has the actual return.
+ if (!funit->finalBlock) {
+ mlir::OpBuilder::InsertPoint insPt = builder->saveInsertionPoint();
+ funit->finalBlock = builder->createBlock(&builder->getRegion());
+ builder->restoreInsertionPoint(insPt);
+ }
+ builder->create<mlir::cf::BranchOp>(loc, funit->finalBlock);
+ }
+
+ void genFIR(const Fortran::parser::CycleStmt &) {
+ genFIRBranch(getEval().controlSuccessor->block);
}
- void genFIR(const Fortran::parser::ElsewhereStmt &stmt) {
- implicitIterSpace.append(nullptr);
+ void genFIR(const Fortran::parser::ExitStmt &) {
+ genFIRBranch(getEval().controlSuccessor->block);
}
- void genFIR(const Fortran::parser::EndWhereStmt &) {
- implicitIterSpace.shrinkStack();
+ void genFIR(const Fortran::parser::GotoStmt &) {
+ genFIRBranch(getEval().controlSuccessor->block);
}
- void genFIR(const Fortran::parser::WhereStmt &stmt) {
- Fortran::lower::StatementContext stmtCtx;
- const auto &assign = std::get<Fortran::parser::AssignmentStmt>(stmt.t);
- implicitIterSpace.growStack();
- implicitIterSpace.append(Fortran::semantics::GetExpr(
- std::get<Fortran::parser::LogicalExpr>(stmt.t)));
- genAssignment(*assign.typedAssignment->v);
- implicitIterSpace.shrinkStack();
+ void genFIR(const Fortran::parser::EndDoStmt &) {
+ TODO(toLocation(), "EndDoStmt lowering");
}
- void genFIR(const Fortran::parser::PointerAssignmentStmt &stmt) {
- genAssignment(*stmt.typedAssignment->v);
- }
+ // Nop statements - No code, or code is generated at the construct level.
+ void genFIR(const Fortran::parser::AssociateStmt &) {} // nop
+ void genFIR(const Fortran::parser::CaseStmt &) {} // nop
+ void genFIR(const Fortran::parser::ContinueStmt &) {} // nop
+ void genFIR(const Fortran::parser::ElseIfStmt &) {} // nop
+ void genFIR(const Fortran::parser::ElseStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndAssociateStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndFunctionStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndIfStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndMpSubprogramStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndSelectStmt &) {} // nop
+ void genFIR(const Fortran::parser::EndSubroutineStmt &) {} // nop
+ void genFIR(const Fortran::parser::EntryStmt &) {} // nop
+ void genFIR(const Fortran::parser::IfStmt &) {} // nop
+ void genFIR(const Fortran::parser::IfThenStmt &) {} // nop
- void genFIR(const Fortran::parser::AssignmentStmt &stmt) {
- genAssignment(*stmt.typedAssignment->v);
+ void genFIR(const Fortran::parser::NonLabelDoStmt &) {
+ TODO(toLocation(), "NonLabelDoStmt lowering");
}
- void genFIR(const Fortran::parser::SyncAllStmt &stmt) {
- TODO(toLocation(), "SyncAllStmt lowering");
+ void genFIR(const Fortran::parser::OmpEndLoopDirective &) {
+ TODO(toLocation(), "OmpEndLoopDirective lowering");
}
- void genFIR(const Fortran::parser::SyncImagesStmt &stmt) {
- TODO(toLocation(), "SyncImagesStmt lowering");
+ void genFIR(const Fortran::parser::NamelistStmt &) {
+ TODO(toLocation(), "NamelistStmt lowering");
}
- void genFIR(const Fortran::parser::SyncMemoryStmt &stmt) {
- TODO(toLocation(), "SyncMemoryStmt lowering");
- }
+ /// Generate FIR for the Evaluation `eval`.
+ void genFIR(Fortran::lower::pft::Evaluation &eval,
+ bool unstructuredContext = true) {
+ if (unstructuredContext) {
+ // When transitioning from unstructured to structured code,
+ // the structured code could be a target that starts a new block.
+ maybeStartBlock(eval.isConstruct() && eval.lowerAsStructured()
+ ? eval.getFirstNestedEvaluation().block
+ : eval.block);
+ }
- void genFIR(const Fortran::parser::SyncTeamStmt &stmt) {
- TODO(toLocation(), "SyncTeamStmt lowering");
+ setCurrentEval(eval);
+ setCurrentPosition(eval.position);
+ eval.visit([&](const auto &stmt) { genFIR(stmt); });
+
+ if (unstructuredContext && blockIsUnterminated()) {
+ // Exit from an unstructured IF or SELECT construct block.
+ Fortran::lower::pft::Evaluation *successor{};
+ if (eval.isActionStmt())
+ successor = eval.controlSuccessor;
+ else if (eval.isConstruct() &&
+ eval.getLastNestedEvaluation()
+ .lexicalSuccessor->isIntermediateConstructStmt())
+ successor = eval.constructExit;
+ if (successor && successor->block)
+ genFIRBranch(successor->block);
+ }
}
- void genFIR(const Fortran::parser::UnlockStmt &stmt) {
- TODO(toLocation(), "UnlockStmt lowering");
+ /// Map mlir function block arguments to the corresponding Fortran dummy
+ /// variables. When the result is passed as a hidden argument, the Fortran
+ /// result is also mapped. The symbol map is used to hold this mapping.
+ void mapDummiesAndResults(Fortran::lower::pft::FunctionLikeUnit &funit,
+ const Fortran::lower::CalleeInterface &callee) {
+ assert(builder && "require a builder object at this point");
+ using PassBy = Fortran::lower::CalleeInterface::PassEntityBy;
+ auto mapPassedEntity = [&](const auto arg) -> void {
+ if (arg.passBy == PassBy::AddressAndLength) {
+ // TODO: now that fir call has some attributes regarding character
+ // return, PassBy::AddressAndLength should be retired.
+ mlir::Location loc = toLocation();
+ fir::factory::CharacterExprHelper charHelp{*builder, loc};
+ mlir::Value box =
+ charHelp.createEmboxChar(arg.firArgument, arg.firLength);
+ addSymbol(arg.entity->get(), box);
+ } else {
+ if (arg.entity.has_value()) {
+ addSymbol(arg.entity->get(), arg.firArgument);
+ } else {
+ assert(funit.parentHasHostAssoc());
+ funit.parentHostAssoc().internalProcedureBindings(*this,
+ localSymbols);
+ }
+ }
+ };
+ for (const Fortran::lower::CalleeInterface::PassedEntity &arg :
+ callee.getPassedArguments())
+ mapPassedEntity(arg);
+
+ // Allocate local skeleton instances of dummies from other entry points.
+ // Most of these locals will not survive into final generated code, but
+ // some will. It is illegal to reference them at run time if they do.
+ for (const Fortran::semantics::Symbol *arg :
+ funit.nonUniversalDummyArguments) {
+ if (lookupSymbol(*arg))
+ continue;
+ mlir::Type type = genType(*arg);
+ // TODO: Account for VALUE arguments (and possibly other variants).
+ type = builder->getRefType(type);
+ addSymbol(*arg, builder->create<fir::UndefOp>(toLocation(), type));
+ }
+ if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
+ passedResult = callee.getPassedResult()) {
+ mapPassedEntity(*passedResult);
+ // FIXME: need to make sure things are OK here. addSymbol may not be OK
+ if (funit.primaryResult &&
+ passedResult->entity->get() != *funit.primaryResult)
+ addSymbol(*funit.primaryResult,
+ getSymbolAddress(passedResult->entity->get()));
+ }
}
- void genFIR(const Fortran::parser::AssignStmt &stmt) {
- const Fortran::semantics::Symbol &symbol =
- *std::get<Fortran::parser::Name>(stmt.t).symbol;
- mlir::Location loc = toLocation();
- mlir::Value labelValue = builder->createIntegerConstant(
- loc, genType(symbol), std::get<Fortran::parser::Label>(stmt.t));
- builder->create<fir::StoreOp>(loc, labelValue, getSymbolAddress(symbol));
+ /// Instantiate variable \p var and add it to the symbol map.
+ /// See ConvertVariable.cpp.
+ void instantiateVar(const Fortran::lower::pft::Variable &var,
+ Fortran::lower::AggregateStoreMap &storeMap) {
+ Fortran::lower::instantiateVariable(*this, var, localSymbols, storeMap);
}
- void genFIR(const Fortran::parser::FormatStmt &) {
- // do nothing.
+ /// Prepare to translate a new function
+ void startNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
+ assert(!builder && "expected nullptr");
+ Fortran::lower::CalleeInterface callee(funit, *this);
+ mlir::FuncOp func = callee.addEntryBlockAndMapArguments();
+ builder = new fir::FirOpBuilder(func, bridge.getKindMap());
+ assert(builder && "FirOpBuilder did not instantiate");
+ builder->setInsertionPointToStart(&func.front());
+ func.setVisibility(mlir::SymbolTable::Visibility::Public);
+
+ mapDummiesAndResults(funit, callee);
+
+ // Note: not storing Variable references because getOrderedSymbolTable
+ // below returns a temporary.
+ llvm::SmallVector<Fortran::lower::pft::Variable> deferredFuncResultList;
+
+ // Backup actual argument for entry character results
+ // with different lengths. It needs to be added to the non
+ // primary results symbol before mapSymbolAttributes is called.
+ Fortran::lower::SymbolBox resultArg;
+ if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
+ passedResult = callee.getPassedResult())
+ resultArg = lookupSymbol(passedResult->entity->get());
+
+ Fortran::lower::AggregateStoreMap storeMap;
+ // The front-end is currently not adding module variables referenced
+ // in a module procedure as host associated. As a result we need to
+ // instantiate all module variables here if this is a module procedure.
+ // It is likely that the front-end behavior should change here.
+ // This also applies to internal procedures inside module procedures.
+ if (auto *module = Fortran::lower::pft::getAncestor<
+ Fortran::lower::pft::ModuleLikeUnit>(funit))
+ for (const Fortran::lower::pft::Variable &var :
+ module->getOrderedSymbolTable())
+ instantiateVar(var, storeMap);
+
+ mlir::Value primaryFuncResultStorage;
+ for (const Fortran::lower::pft::Variable &var :
+ funit.getOrderedSymbolTable()) {
+ // Always instantiate aggregate storage blocks.
+ if (var.isAggregateStore()) {
+ instantiateVar(var, storeMap);
+ continue;
+ }
+ const Fortran::semantics::Symbol &sym = var.getSymbol();
+ if (funit.parentHasHostAssoc()) {
+ // Never instantitate host associated variables, as they are already
+ // instantiated from an argument tuple. Instead, just bind the symbol to
+ // the reference to the host variable, which must be in the map.
+ const Fortran::semantics::Symbol &ultimate = sym.GetUltimate();
+ if (funit.parentHostAssoc().isAssociated(ultimate)) {
+ Fortran::lower::SymbolBox hostBox =
+ localSymbols.lookupSymbol(ultimate);
+ assert(hostBox && "host association is not in map");
+ localSymbols.addSymbol(sym, hostBox.toExtendedValue());
+ continue;
+ }
+ }
+ if (!sym.IsFuncResult() || !funit.primaryResult) {
+ instantiateVar(var, storeMap);
+ } else if (&sym == funit.primaryResult) {
+ instantiateVar(var, storeMap);
+ primaryFuncResultStorage = getSymbolAddress(sym);
+ } else {
+ deferredFuncResultList.push_back(var);
+ }
+ }
+
+ // If this is a host procedure with host associations, then create the tuple
+ // of pointers for passing to the internal procedures.
+ if (!funit.getHostAssoc().empty())
+ funit.getHostAssoc().hostProcedureBindings(*this, localSymbols);
+
+ /// TODO: should use same mechanism as equivalence?
+ /// One blocking point is character entry returns that need special handling
+ /// since they are not locally allocated but come as argument. CHARACTER(*)
+ /// is not something that fit wells with equivalence lowering.
+ for (const Fortran::lower::pft::Variable &altResult :
+ deferredFuncResultList) {
+ if (std::optional<Fortran::lower::CalleeInterface::PassedEntity>
+ passedResult = callee.getPassedResult())
+ addSymbol(altResult.getSymbol(), resultArg.getAddr());
+ Fortran::lower::StatementContext stmtCtx;
+ Fortran::lower::mapSymbolAttributes(*this, altResult, localSymbols,
+ stmtCtx, primaryFuncResultStorage);
+ }
+
+ // Create most function blocks in advance.
+ createEmptyBlocks(funit.evaluationList);
+
+ // Reinstate entry block as the current insertion point.
+ builder->setInsertionPointToEnd(&func.front());
+
+ if (callee.hasAlternateReturns()) {
+ // Create a local temp to hold the alternate return index.
+ // Give it an integer index type and the subroutine name (for dumps).
+ // Attach it to the subroutine symbol in the localSymbols map.
+ // Initialize it to zero, the "fallthrough" alternate return value.
+ const Fortran::semantics::Symbol &symbol = funit.getSubprogramSymbol();
+ mlir::Location loc = toLocation();
+ mlir::Type idxTy = builder->getIndexType();
+ mlir::Value altResult =
+ builder->createTemporary(loc, idxTy, toStringRef(symbol.name()));
+ addSymbol(symbol, altResult);
+ mlir::Value zero = builder->createIntegerConstant(loc, idxTy, 0);
+ builder->create<fir::StoreOp>(loc, zero, altResult);
+ }
- // FORMAT statements have no semantics. They may be lowered if used by a
- // data transfer statement.
+ if (Fortran::lower::pft::Evaluation *alternateEntryEval =
+ funit.getEntryEval())
+ genFIRBranch(alternateEntryEval->lexicalSuccessor->block);
}
- void genFIR(const Fortran::parser::PauseStmt &stmt) {
- genPauseStatement(*this, stmt);
+ /// Create global blocks for the current function. This eliminates the
+ /// distinction between forward and backward targets when generating
+ /// branches. A block is "global" if it can be the target of a GOTO or
+ /// other source code branch. A block that can only be targeted by a
+ /// compiler generated branch is "local". For example, a DO loop preheader
+ /// block containing loop initialization code is global. A loop header
+ /// block, which is the target of the loop back edge, is local. Blocks
+ /// belong to a region. Any block within a nested region must be replaced
+ /// with a block belonging to that region. Branches may not cross region
+ /// boundaries.
+ void createEmptyBlocks(
+ std::list<Fortran::lower::pft::Evaluation> &evaluationList) {
+ mlir::Region *region = &builder->getRegion();
+ for (Fortran::lower::pft::Evaluation &eval : evaluationList) {
+ if (eval.isNewBlock)
+ eval.block = builder->createBlock(region);
+ if (eval.isConstruct() || eval.isDirective()) {
+ if (eval.lowerAsUnstructured()) {
+ createEmptyBlocks(eval.getNestedEvaluations());
+ } else if (eval.hasNestedEvaluations()) {
+ // A structured construct that is a target starts a new block.
+ Fortran::lower::pft::Evaluation &constructStmt =
+ eval.getFirstNestedEvaluation();
+ if (constructStmt.isNewBlock)
+ constructStmt.block = builder->createBlock(region);
+ }
+ }
+ }
}
- void genFIR(const Fortran::parser::FailImageStmt &stmt) {
- TODO(toLocation(), "FailImageStmt lowering");
+ /// Return the predicate: "current block does not have a terminator branch".
+ bool blockIsUnterminated() {
+ mlir::Block *currentBlock = builder->getBlock();
+ return currentBlock->empty() ||
+ !currentBlock->back().hasTrait<mlir::OpTrait::IsTerminator>();
}
- // call STOP, ERROR STOP in runtime
- void genFIR(const Fortran::parser::StopStmt &stmt) {
- genStopStatement(*this, stmt);
+ /// Unconditionally switch code insertion to a new block.
+ void startBlock(mlir::Block *newBlock) {
+ assert(newBlock && "missing block");
+ // Default termination for the current block is a fallthrough branch to
+ // the new block.
+ if (blockIsUnterminated())
+ genFIRBranch(newBlock);
+ // Some blocks may be re/started more than once, and might not be empty.
+ // If the new block already has (only) a terminator, set the insertion
+ // point to the start of the block. Otherwise set it to the end.
+ builder->setInsertionPointToStart(newBlock);
+ if (blockIsUnterminated())
+ builder->setInsertionPointToEnd(newBlock);
}
- void genFIR(const Fortran::parser::ReturnStmt &stmt) {
- Fortran::lower::pft::FunctionLikeUnit *funit =
- getEval().getOwningProcedure();
- assert(funit && "not inside main program, function or subroutine");
- if (funit->isMainProgram()) {
- genExitRoutine();
- return;
- }
- mlir::Location loc = toLocation();
- if (stmt.v) {
- // Alternate return statement - If this is a subroutine where some
- // alternate entries have alternate returns, but the active entry point
- // does not, ignore the alternate return value. Otherwise, assign it
- // to the compiler-generated result variable.
- const Fortran::semantics::Symbol &symbol = funit->getSubprogramSymbol();
- if (Fortran::semantics::HasAlternateReturns(symbol)) {
- Fortran::lower::StatementContext stmtCtx;
- const Fortran::lower::SomeExpr *expr =
- Fortran::semantics::GetExpr(*stmt.v);
- assert(expr && "missing alternate return expression");
- mlir::Value altReturnIndex = builder->createConvert(
- loc, builder->getIndexType(), createFIRExpr(loc, expr, stmtCtx));
- builder->create<fir::StoreOp>(loc, altReturnIndex,
- getAltReturnResult(symbol));
- }
- }
- // Branch to the last block of the SUBROUTINE, which has the actual return.
- if (!funit->finalBlock) {
- mlir::OpBuilder::InsertPoint insPt = builder->saveInsertionPoint();
- funit->finalBlock = builder->createBlock(&builder->getRegion());
- builder->restoreInsertionPoint(insPt);
- }
- builder->create<mlir::cf::BranchOp>(loc, funit->finalBlock);
+ /// Conditionally switch code insertion to a new block.
+ void maybeStartBlock(mlir::Block *newBlock) {
+ if (newBlock)
+ startBlock(newBlock);
}
- void genFIR(const Fortran::parser::CycleStmt &) {
- TODO(toLocation(), "CycleStmt lowering");
+ /// Emit return and cleanup after the function has been translated.
+ void endNewFunction(Fortran::lower::pft::FunctionLikeUnit &funit) {
+ setCurrentPosition(Fortran::lower::pft::stmtSourceLoc(funit.endStmt));
+ if (funit.isMainProgram())
+ genExitRoutine();
+ else
+ genFIRProcedureExit(funit, funit.getSubprogramSymbol());
+ funit.finalBlock = nullptr;
+ LLVM_DEBUG(llvm::dbgs() << "*** Lowering result:\n\n"
+ << *builder->getFunction() << '\n');
+ // FIXME: Simplification should happen in a normal pass, not here.
+ mlir::IRRewriter rewriter(*builder);
+ (void)mlir::simplifyRegions(rewriter,
+ {builder->getRegion()}); // remove dead code
+ delete builder;
+ builder = nullptr;
+ hostAssocTuple = mlir::Value{};
+ localSymbols.clear();
}
- void genFIR(const Fortran::parser::ExitStmt &) {
- TODO(toLocation(), "ExitStmt lowering");
+ /// Helper to generate GlobalOps when the builder is not positioned in any
+ /// region block. This is required because the FirOpBuilder assumes it is
+ /// always positioned inside a region block when creating globals, the easiest
+ /// way comply is to create a dummy function and to throw it afterwards.
+ void createGlobalOutsideOfFunctionLowering(
+ const std::function<void()> &createGlobals) {
+ // FIXME: get rid of the bogus function context and instantiate the
+ // globals directly into the module.
+ mlir::MLIRContext *context = &getMLIRContext();
+ mlir::FuncOp func = fir::FirOpBuilder::createFunction(
+ mlir::UnknownLoc::get(context), getModuleOp(),
+ fir::NameUniquer::doGenerated("Sham"),
+ mlir::FunctionType::get(context, llvm::None, llvm::None));
+ func.addEntryBlock();
+ builder = new fir::FirOpBuilder(func, bridge.getKindMap());
+ createGlobals();
+ if (mlir::Region *region = func.getCallableRegion())
+ region->dropAllReferences();
+ func.erase();
+ delete builder;
+ builder = nullptr;
+ localSymbols.clear();
}
-
- void genFIR(const Fortran::parser::GotoStmt &) {
- genFIRBranch(getEval().controlSuccessor->block);
+ /// Instantiate the data from a BLOCK DATA unit.
+ void lowerBlockData(Fortran::lower::pft::BlockDataUnit &bdunit) {
+ createGlobalOutsideOfFunctionLowering([&]() {
+ Fortran::lower::AggregateStoreMap fakeMap;
+ for (const auto &[_, sym] : bdunit.symTab) {
+ if (sym->has<Fortran::semantics::ObjectEntityDetails>()) {
+ Fortran::lower::pft::Variable var(*sym, true);
+ instantiateVar(var, fakeMap);
+ }
+ }
+ });
}
- void genFIR(const Fortran::parser::ElseIfStmt &) {
- TODO(toLocation(), "ElseIfStmt lowering");
+ /// Lower a procedure (nest).
+ void lowerFunc(Fortran::lower::pft::FunctionLikeUnit &funit) {
+ if (!funit.isMainProgram()) {
+ const Fortran::semantics::Symbol &procSymbol =
+ funit.getSubprogramSymbol();
+ if (procSymbol.owner().IsSubmodule()) {
+ TODO(toLocation(), "support submodules");
+ return;
+ }
+ }
+ setCurrentPosition(funit.getStartingSourceLoc());
+ for (int entryIndex = 0, last = funit.entryPointList.size();
+ entryIndex < last; ++entryIndex) {
+ funit.setActiveEntry(entryIndex);
+ startNewFunction(funit); // the entry point for lowering this procedure
+ for (Fortran::lower::pft::Evaluation &eval : funit.evaluationList)
+ genFIR(eval);
+ endNewFunction(funit);
+ }
+ funit.setActiveEntry(0);
+ for (Fortran::lower::pft::FunctionLikeUnit &f : funit.nestedFunctions)
+ lowerFunc(f); // internal procedure
}
- void genFIR(const Fortran::parser::ElseStmt &) {
- TODO(toLocation(), "ElseStmt lowering");
+ /// Lower module variable definitions to fir::globalOp and OpenMP/OpenACC
+ /// declarative construct.
+ void lowerModuleDeclScope(Fortran::lower::pft::ModuleLikeUnit &mod) {
+ setCurrentPosition(mod.getStartingSourceLoc());
+ createGlobalOutsideOfFunctionLowering([&]() {
+ for (const Fortran::lower::pft::Variable &var :
+ mod.getOrderedSymbolTable()) {
+ // Only define the variables owned by this module.
+ const Fortran::semantics::Scope *owningScope = var.getOwningScope();
+ if (!owningScope || mod.getScope() == *owningScope)
+ Fortran::lower::defineModuleVariable(*this, var);
+ }
+ for (auto &eval : mod.evaluationList)
+ genFIR(eval);
+ });
}
- void genFIR(const Fortran::parser::EndDoStmt &) {
- TODO(toLocation(), "EndDoStmt lowering");
+ /// Lower functions contained in a module.
+ void lowerMod(Fortran::lower::pft::ModuleLikeUnit &mod) {
+ for (Fortran::lower::pft::FunctionLikeUnit &f : mod.nestedFunctions)
+ lowerFunc(f);
}
- void genFIR(const Fortran::parser::EndMpSubprogramStmt &) {
- TODO(toLocation(), "EndMpSubprogramStmt lowering");
+ void setCurrentPosition(const Fortran::parser::CharBlock &position) {
+ if (position != Fortran::parser::CharBlock{})
+ currentPosition = position;
}
- // Nop statements - No code, or code is generated at the construct level.
- void genFIR(const Fortran::parser::AssociateStmt &) {} // nop
- void genFIR(const Fortran::parser::CaseStmt &) {} // nop
- void genFIR(const Fortran::parser::ContinueStmt &) {} // nop
- void genFIR(const Fortran::parser::EndAssociateStmt &) {} // nop
- void genFIR(const Fortran::parser::EndFunctionStmt &) {} // nop
- void genFIR(const Fortran::parser::EndIfStmt &) {} // nop
- void genFIR(const Fortran::parser::EndSelectStmt &) {} // nop
- void genFIR(const Fortran::parser::EndSubroutineStmt &) {} // nop
- void genFIR(const Fortran::parser::EntryStmt &) {} // nop
-
- void genFIR(const Fortran::parser::IfStmt &) {
- TODO(toLocation(), "IfStmt lowering");
+ /// Set current position at the location of \p parseTreeNode. Note that the
+ /// position is updated automatically when visiting statements, but not when
+ /// entering higher level nodes like constructs or procedures. This helper is
+ /// intended to cover the latter cases.
+ template <typename A>
+ void setCurrentPositionAt(const A &parseTreeNode) {
+ setCurrentPosition(Fortran::parser::FindSourceLocation(parseTreeNode));
}
- void genFIR(const Fortran::parser::IfThenStmt &) {
- TODO(toLocation(), "IfThenStmt lowering");
- }
+ //===--------------------------------------------------------------------===//
+ // Utility methods
+ //===--------------------------------------------------------------------===//
- void genFIR(const Fortran::parser::NonLabelDoStmt &) {
- TODO(toLocation(), "NonLabelDoStmt lowering");
+ /// Convert a parser CharBlock to a Location
+ mlir::Location toLocation(const Fortran::parser::CharBlock &cb) {
+ return genLocation(cb);
}
- void genFIR(const Fortran::parser::OmpEndLoopDirective &) {
- TODO(toLocation(), "OmpEndLoopDirective lowering");
+ mlir::Location toLocation() { return toLocation(currentPosition); }
+ void setCurrentEval(Fortran::lower::pft::Evaluation &eval) {
+ evalPtr = &eval;
}
-
- void genFIR(const Fortran::parser::NamelistStmt &) {
- TODO(toLocation(), "NamelistStmt lowering");
+ Fortran::lower::pft::Evaluation &getEval() {
+ assert(evalPtr);
+ return *evalPtr;
}
- /// Generate FIR for the Evaluation `eval`.
- void genFIR(Fortran::lower::pft::Evaluation &eval,
- bool unstructuredContext = true) {
- if (unstructuredContext) {
- // When transitioning from unstructured to structured code,
- // the structured code could be a target that starts a new block.
- maybeStartBlock(eval.isConstruct() && eval.lowerAsStructured()
- ? eval.getFirstNestedEvaluation().block
- : eval.block);
- }
-
- setCurrentEval(eval);
- setCurrentPosition(eval.position);
- eval.visit([&](const auto &stmt) { genFIR(stmt); });
-
- if (unstructuredContext && blockIsUnterminated()) {
- // Exit from an unstructured IF or SELECT construct block.
- Fortran::lower::pft::Evaluation *successor{};
- if (eval.isActionStmt())
- successor = eval.controlSuccessor;
- else if (eval.isConstruct() &&
- eval.getLastNestedEvaluation()
- .lexicalSuccessor->isIntermediateConstructStmt())
- successor = eval.constructExit;
- if (successor && successor->block)
- genFIRBranch(successor->block);
- }
+ std::optional<Fortran::evaluate::Shape>
+ getShape(const Fortran::lower::SomeExpr &expr) {
+ return Fortran::evaluate::GetShape(foldingContext, expr);
}
//===--------------------------------------------------------------------===//
});
}
+ void createRuntimeTypeInfoGlobals() {}
+
//===--------------------------------------------------------------------===//
Fortran::lower::LoweringBridge &bridge;
Fortran::parser::CharBlock currentPosition;
RuntimeTypeInfoConverter runtimeTypeInfoConverter;
- /// Tuple of host assoicated variables.
- mlir::Value hostAssocTuple;
+ /// WHERE statement/construct mask expression stack.
Fortran::lower::ImplicitIterSpace implicitIterSpace;
+
+ /// FORALL context
Fortran::lower::ExplicitIterSpace explicitIterSpace;
+
+ /// Tuple of host assoicated variables.
+ mlir::Value hostAssocTuple;
+
+ std::size_t constructDepth = 0;
};
} // namespace
converter.run(*pft);
}
+void Fortran::lower::LoweringBridge::parseSourceFile(llvm::SourceMgr &srcMgr) {
+ mlir::OwningOpRef<mlir::ModuleOp> owningRef =
+ mlir::parseSourceFile<mlir::ModuleOp>(srcMgr, &context);
+ module.reset(new mlir::ModuleOp(owningRef.get().getOperation()));
+ owningRef.release();
+}
+
Fortran::lower::LoweringBridge::LoweringBridge(
mlir::MLIRContext &context,
const Fortran::common::IntrinsicTypeDefaultKinds &defaultKinds,
default:
break;
}
- if (!diag.getLocation().isa<UnknownLoc>())
+ if (!diag.getLocation().isa<mlir::UnknownLoc>())
os << diag.getLocation() << ": ";
os << diag << '\n';
os.flush();
module = std::make_unique<mlir::ModuleOp>(
mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)));
assert(module.get() && "module was not created");
- fir::setTargetTriple(getModule(), triple);
- fir::setKindMapping(getModule(), kindMap);
+ fir::setTargetTriple(*module.get(), triple);
+ fir::setKindMapping(*module.get(), kindMap);
}
ExprVisitor visitor) const {
// Walk directly the result symbol shape (the characteristic shape may contain
// descriptor inquiries to it that would fail to lower on the caller side).
- const Fortran::semantics::Symbol *interfaceSymbol =
- procRef.proc().GetInterfaceSymbol();
- if (interfaceSymbol) {
- const Fortran::semantics::Symbol &result =
- interfaceSymbol->get<Fortran::semantics::SubprogramDetails>().result();
+ const Fortran::semantics::SubprogramDetails *interfaceDetails =
+ getInterfaceDetails();
+ if (interfaceDetails) {
+ const Fortran::semantics::Symbol &result = interfaceDetails->result();
if (const auto *objectDetails =
result.detailsIf<Fortran::semantics::ObjectEntityDetails>())
if (objectDetails->shape().IsExplicitShape())
const std::optional<Fortran::evaluate::characteristics::FunctionResult>
&result = characteristic->functionResult;
if (!result || result->CanBeReturnedViaImplicitInterface() ||
- !procRef.proc().GetInterfaceSymbol())
+ !getInterfaceDetails())
return false;
bool allResultSpecExprConstant = true;
auto visitor = [&](const Fortran::lower::SomeExpr &e) {
mlir::Value Fortran::lower::CallerInterface::getArgumentValue(
const semantics::Symbol &sym) const {
mlir::Location loc = converter.getCurrentLocation();
- const Fortran::semantics::Symbol *iface = procRef.proc().GetInterfaceSymbol();
- if (!iface)
+ const Fortran::semantics::SubprogramDetails *ifaceDetails =
+ getInterfaceDetails();
+ if (!ifaceDetails)
fir::emitFatalError(
loc, "mapping actual and dummy arguments requires an interface");
const std::vector<Fortran::semantics::Symbol *> &dummies =
- iface->get<semantics::SubprogramDetails>().dummyArgs();
+ ifaceDetails->dummyArgs();
auto it = std::find(dummies.begin(), dummies.end(), &sym);
if (it == dummies.end())
fir::emitFatalError(loc, "symbol is not a dummy in this call");
const Fortran::semantics::Symbol &
Fortran::lower::CallerInterface::getResultSymbol() const {
mlir::Location loc = converter.getCurrentLocation();
- const Fortran::semantics::Symbol *iface = procRef.proc().GetInterfaceSymbol();
- if (!iface)
+ const Fortran::semantics::SubprogramDetails *ifaceDetails =
+ getInterfaceDetails();
+ if (!ifaceDetails)
fir::emitFatalError(
loc, "mapping actual and dummy arguments requires an interface");
- return iface->get<semantics::SubprogramDetails>().result();
+ return ifaceDetails->result();
+}
+
+const Fortran::semantics::SubprogramDetails *
+Fortran::lower::CallerInterface::getInterfaceDetails() const {
+ if (const Fortran::semantics::Symbol *iface =
+ procRef.proc().GetInterfaceSymbol())
+ return iface->GetUltimate()
+ .detailsIf<Fortran::semantics::SubprogramDetails>();
+ return nullptr;
}
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
#include "flang/Lower/ConvertExpr.h"
+#include "flang/Common/default-kinds.h"
+#include "flang/Common/unwrap.h"
#include "flang/Evaluate/fold.h"
+#include "flang/Evaluate/real.h"
#include "flang/Evaluate/traverse.h"
-#include "flang/Lower/AbstractConverter.h"
#include "flang/Lower/Allocatable.h"
+#include "flang/Lower/Bridge.h"
#include "flang/Lower/BuiltinModules.h"
#include "flang/Lower/CallInterface.h"
+#include "flang/Lower/Coarray.h"
#include "flang/Lower/ComponentPath.h"
#include "flang/Lower/ConvertType.h"
#include "flang/Lower/ConvertVariable.h"
#include "flang/Lower/DumpEvaluateExpr.h"
#include "flang/Lower/IntrinsicCall.h"
#include "flang/Lower/Mangler.h"
-#include "flang/Lower/StatementContext.h"
-#include "flang/Lower/SymbolMap.h"
+#include "flang/Lower/Runtime.h"
+#include "flang/Lower/Support/Utils.h"
#include "flang/Lower/Todo.h"
#include "flang/Optimizer/Builder/Character.h"
#include "flang/Optimizer/Builder/Complex.h"
#include "flang/Optimizer/Builder/Factory.h"
-#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
-#include "flang/Optimizer/Builder/MutableBox.h"
#include "flang/Optimizer/Builder/Runtime/Character.h"
#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
#include "flang/Optimizer/Builder/Runtime/Ragged.h"
+#include "flang/Optimizer/Dialect/FIRAttr.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
#include "flang/Optimizer/Dialect/FIROpsSupport.h"
-#include "flang/Optimizer/Support/Matcher.h"
+#include "flang/Optimizer/Support/FatalError.h"
#include "flang/Semantics/expression.h"
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
#define DEBUG_TYPE "flang-lower-expr"
return builder.createRealConstant(getLoc(), fltTy, value);
}
+ mlir::Type getSomeKindInteger() { return builder.getIndexType(); }
+
+ mlir::FuncOp getFunction(llvm::StringRef name, mlir::FunctionType funTy) {
+ if (mlir::FuncOp func = builder.getNamedFunction(name))
+ return func;
+ return builder.createFunction(getLoc(), name, funTy);
+ }
+
template <typename OpTy>
mlir::Value createCompareOp(mlir::arith::CmpIPredicate pred,
const ExtValue &left, const ExtValue &right) {
}
ExtValue genval(const Fortran::evaluate::BOZLiteralConstant &) {
- TODO(getLoc(), "genval BOZ");
+ TODO(getLoc(), "BOZ");
}
/// Return indirection to function designated in ProcedureDesignator.
}
ExtValue genval(const Fortran::evaluate::TypeParamInquiry &) {
- TODO(getLoc(), "genval TypeParamInquiry");
+ TODO(getLoc(), "type parameter inquiry");
+ }
+
+ mlir::Value extractComplexPart(mlir::Value cplx, bool isImagPart) {
+ return fir::factory::Complex{builder, getLoc()}.extractComplexPart(
+ cplx, isImagPart);
}
template <int KIND>
ExtValue genval(const Fortran::evaluate::ComplexComponent<KIND> &part) {
- TODO(getLoc(), "genval ComplexComponent");
+ return extractComplexPart(genunbox(part.left()), part.isImaginaryPart);
}
template <int KIND>
mlir::Value zero = genIntegerConstant<KIND>(builder.getContext(), 0);
return builder.create<mlir::arith::SubIOp>(getLoc(), zero, input);
}
-
template <int KIND>
ExtValue genval(const Fortran::evaluate::Negate<Fortran::evaluate::Type<
Fortran::common::TypeCategory::Real, KIND>> &op) {
ExtValue
genval(const Fortran::evaluate::Extremum<Fortran::evaluate::Type<TC, KIND>>
&op) {
- TODO(getLoc(), "genval Extremum<TC, KIND>");
+ mlir::Value lhs = genunbox(op.left());
+ mlir::Value rhs = genunbox(op.right());
+ switch (op.ordering) {
+ case Fortran::evaluate::Ordering::Greater:
+ return Fortran::lower::genMax(builder, getLoc(),
+ llvm::ArrayRef<mlir::Value>{lhs, rhs});
+ case Fortran::evaluate::Ordering::Less:
+ return Fortran::lower::genMin(builder, getLoc(),
+ llvm::ArrayRef<mlir::Value>{lhs, rhs});
+ case Fortran::evaluate::Ordering::Equal:
+ llvm_unreachable("Equal is not a valid ordering in this context");
+ }
+ llvm_unreachable("unknown ordering");
}
// Change the dynamic length information without actually changing the
template <int KIND>
ExtValue genval(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
Fortran::common::TypeCategory::Complex, KIND>> &op) {
- TODO(getLoc(), "genval complex comparison");
+ return createFltCmpOp<fir::CmpcOp>(op, translateFloatRelational(op.opr));
}
template <int KIND>
ExtValue genval(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
genval(const Fortran::evaluate::Convert<Fortran::evaluate::Type<TC1, KIND>,
TC2> &convert) {
mlir::Type ty = converter.genType(TC1, KIND);
- mlir::Value operand = genunbox(convert.left());
- return builder.convertWithSemantics(getLoc(), ty, operand);
+ auto fromExpr = genval(convert.left());
+ auto loc = getLoc();
+ return fromExpr.match(
+ [&](const fir::CharBoxValue &boxchar) -> ExtValue {
+ if constexpr (TC1 == Fortran::common::TypeCategory::Character &&
+ TC2 == TC1) {
+ // Use char_convert. Each code point is translated from a
+ // narrower/wider encoding to the target encoding. For example, 'A'
+ // may be translated from 0x41 : i8 to 0x0041 : i16. The symbol
+ // for euro (0x20AC : i16) may be translated from a wide character
+ // to "0xE2 0x82 0xAC" : UTF-8.
+ mlir::Value bufferSize = boxchar.getLen();
+ auto kindMap = builder.getKindMap();
+ auto fromBits = kindMap.getCharacterBitsize(
+ fir::unwrapRefType(boxchar.getAddr().getType())
+ .cast<fir::CharacterType>()
+ .getFKind());
+ auto toBits = kindMap.getCharacterBitsize(
+ ty.cast<fir::CharacterType>().getFKind());
+ if (toBits < fromBits) {
+ // Scale by relative ratio to give a buffer of the same length.
+ auto ratio = builder.createIntegerConstant(
+ loc, bufferSize.getType(), fromBits / toBits);
+ bufferSize =
+ builder.create<mlir::arith::MulIOp>(loc, bufferSize, ratio);
+ }
+ auto dest = builder.create<fir::AllocaOp>(
+ loc, ty, mlir::ValueRange{bufferSize});
+ builder.create<fir::CharConvertOp>(loc, boxchar.getAddr(),
+ boxchar.getLen(), dest);
+ return fir::CharBoxValue{dest, boxchar.getLen()};
+ } else {
+ fir::emitFatalError(
+ loc, "unsupported evaluate::Convert between CHARACTER type "
+ "category and non-CHARACTER category");
+ }
+ },
+ [&](const fir::UnboxedValue &value) -> ExtValue {
+ return builder.convertWithSemantics(loc, ty, value);
+ },
+ [&](auto &) -> ExtValue {
+ fir::emitFatalError(loc, "unsupported evaluate::Convert");
+ });
}
template <typename A>
ExtValue genval(const Fortran::evaluate::Parentheses<A> &op) {
- TODO(getLoc(), "genval parentheses<A>");
+ ExtValue input = genval(op.left());
+ mlir::Value base = fir::getBase(input);
+ mlir::Value newBase =
+ builder.create<fir::NoReassocOp>(getLoc(), base.getType(), base);
+ return fir::substBase(input, newBase);
}
template <int KIND>
return genScalarLit<TC, KIND>(opt.value());
}
}
-
fir::ExtendedValue genval(
const Fortran::evaluate::Constant<Fortran::evaluate::SomeDerived> &con) {
if (con.Rank() > 0)
template <typename A>
ExtValue genval(const Fortran::evaluate::ArrayConstructor<A> &) {
- TODO(getLoc(), "genval ArrayConstructor<A>");
+ fir::emitFatalError(getLoc(),
+ "array constructor: lowering should not reach here");
}
ExtValue gen(const Fortran::evaluate::ComplexPart &x) {
- TODO(getLoc(), "gen ComplexPart");
+ mlir::Location loc = getLoc();
+ auto idxTy = builder.getI32Type();
+ ExtValue exv = gen(x.complex());
+ mlir::Value base = fir::getBase(exv);
+ fir::factory::Complex helper{builder, loc};
+ mlir::Type eleTy =
+ helper.getComplexPartType(fir::dyn_cast_ptrEleTy(base.getType()));
+ mlir::Value offset = builder.createIntegerConstant(
+ loc, idxTy,
+ x.part() == Fortran::evaluate::ComplexPart::Part::RE ? 0 : 1);
+ mlir::Value result = builder.create<fir::CoordinateOp>(
+ loc, builder.getRefType(eleTy), base, mlir::ValueRange{offset});
+ return {result};
}
ExtValue genval(const Fortran::evaluate::ComplexPart &x) {
- TODO(getLoc(), "genval ComplexPart");
+ return genLoad(gen(x));
}
/// Reference to a substring.
}
fir::emitFatalError(getLoc(), "subscript triple notation is not scalar");
}
-
ExtValue genSubscript(const Fortran::evaluate::Subscript &subs) {
return genval(subs);
}
static Fortran::evaluate::DataRef const *
reverseComponents(const Fortran::evaluate::Component &cmpt,
std::list<const Fortran::evaluate::Component *> &list) {
- if (!cmpt.GetLastSymbol().test(
- Fortran::semantics::Symbol::Flag::ParentComp))
+ if (!getLastSym(cmpt).test(Fortran::semantics::Symbol::Flag::ParentComp))
list.push_front(&cmpt);
return std::visit(
Fortran::common::visitors{
[&](const Fortran::evaluate::Component &x) {
- if (Fortran::semantics::IsAllocatableOrPointer(x.GetLastSymbol()))
+ if (Fortran::semantics::IsAllocatableOrPointer(getLastSym(x)))
return &cmpt.base();
return reverseComponents(x, list);
},
// FIXME: need to thread the LEN type parameters here.
for (const Fortran::evaluate::Component *field : list) {
auto recTy = ty.cast<fir::RecordType>();
- const Fortran::semantics::Symbol &sym = field->GetLastSymbol();
+ const Fortran::semantics::Symbol &sym = getLastSym(*field);
llvm::StringRef name = toStringRef(sym.name());
coorArgs.push_back(builder.create<fir::FieldIndexOp>(
loc, fldTy, name, recTy, fir::getTypeParams(obj)));
return genLoad(gen(cmpt));
}
+ // Determine the result type after removing `dims` dimensions from the array
+ // type `arrTy`
+ mlir::Type genSubType(mlir::Type arrTy, unsigned dims) {
+ mlir::Type unwrapTy = fir::dyn_cast_ptrOrBoxEleTy(arrTy);
+ assert(unwrapTy && "must be a pointer or box type");
+ auto seqTy = unwrapTy.cast<fir::SequenceType>();
+ llvm::ArrayRef<int64_t> shape = seqTy.getShape();
+ assert(shape.size() > 0 && "removing columns for sequence sans shape");
+ assert(dims <= shape.size() && "removing more columns than exist");
+ fir::SequenceType::Shape newBnds;
+ // follow Fortran semantics and remove columns (from right)
+ std::size_t e = shape.size() - dims;
+ for (decltype(e) i = 0; i < e; ++i)
+ newBnds.push_back(shape[i]);
+ if (!newBnds.empty())
+ return fir::SequenceType::get(newBnds, seqTy.getEleTy());
+ return seqTy.getEleTy();
+ }
+
+ // Generate the code for a Bound value.
ExtValue genval(const Fortran::semantics::Bound &bound) {
- TODO(getLoc(), "genval Bound");
- }
-
- /// Return lower bounds of \p box in dimension \p dim. The returned value
- /// has type \ty.
- mlir::Value getLBound(const ExtValue &box, unsigned dim, mlir::Type ty) {
- assert(box.rank() > 0 && "must be an array");
- mlir::Location loc = getLoc();
- mlir::Value one = builder.createIntegerConstant(loc, ty, 1);
- mlir::Value lb = fir::factory::readLowerBound(builder, loc, box, dim, one);
- return builder.createConvert(loc, ty, lb);
+ if (bound.isExplicit()) {
+ Fortran::semantics::MaybeSubscriptIntExpr sub = bound.GetExplicit();
+ if (sub.has_value())
+ return genval(*sub);
+ return genIntegerConstant<8>(builder.getContext(), 1);
+ }
+ TODO(getLoc(), "non explicit semantics::Bound lowering");
}
static bool isSlice(const Fortran::evaluate::ArrayRef &aref) {
return genCoordinateOp(base, aref);
}
+ /// Return lower bounds of \p box in dimension \p dim. The returned value
+ /// has type \ty.
+ mlir::Value getLBound(const ExtValue &box, unsigned dim, mlir::Type ty) {
+ assert(box.rank() > 0 && "must be an array");
+ mlir::Location loc = getLoc();
+ mlir::Value one = builder.createIntegerConstant(loc, ty, 1);
+ mlir::Value lb = fir::factory::readLowerBound(builder, loc, box, dim, one);
+ return builder.createConvert(loc, ty, lb);
+ }
+
ExtValue genval(const Fortran::evaluate::ArrayRef &aref) {
return genLoad(gen(aref));
}
ExtValue gen(const Fortran::evaluate::CoarrayRef &coref) {
- TODO(getLoc(), "gen CoarrayRef");
+ return Fortran::lower::CoarrayExprHelper{converter, getLoc(), symMap}
+ .genAddr(coref);
}
+
ExtValue genval(const Fortran::evaluate::CoarrayRef &coref) {
- TODO(getLoc(), "genval CoarrayRef");
+ return Fortran::lower::CoarrayExprHelper{converter, getLoc(), symMap}
+ .genValue(coref);
}
template <typename A>
return placeScalarValueInMemory(builder, getLoc(), retVal, resultType);
}
+ /// Helper to lower intrinsic arguments for inquiry intrinsic.
+ ExtValue
+ lowerIntrinsicArgumentAsInquired(const Fortran::lower::SomeExpr &expr) {
+ if (Fortran::evaluate::IsAllocatableOrPointerObject(
+ expr, converter.getFoldingContext()))
+ return genMutableBoxValue(expr);
+ /// Do not create temps for array sections whose properties only need to be
+ /// inquired: create a descriptor that will be inquired.
+ if (Fortran::evaluate::IsVariable(expr) && isArray(expr) &&
+ !Fortran::evaluate::UnwrapWholeSymbolOrComponentDataRef(expr))
+ return lowerIntrinsicArgumentAsBox(expr);
+ return gen(expr);
+ }
+
+ /// Helper to lower intrinsic arguments to a fir::BoxValue.
+ /// It preserves all the non default lower bounds/non deferred length
+ /// parameter information.
+ ExtValue lowerIntrinsicArgumentAsBox(const Fortran::lower::SomeExpr &expr) {
+ mlir::Location loc = getLoc();
+ ExtValue exv = genBoxArg(expr);
+ mlir::Value box = builder.createBox(loc, exv);
+ return fir::BoxValue(
+ box, fir::factory::getNonDefaultLowerBounds(builder, loc, exv),
+ fir::factory::getNonDeferredLengthParams(exv));
+ }
+
+ /// Generate a call to an intrinsic function.
+ ExtValue
+ genIntrinsicRef(const Fortran::evaluate::ProcedureRef &procRef,
+ const Fortran::evaluate::SpecificIntrinsic &intrinsic,
+ llvm::Optional<mlir::Type> resultType) {
+ llvm::SmallVector<ExtValue> operands;
+
+ llvm::StringRef name = intrinsic.name;
+ mlir::Location loc = getLoc();
+ if (Fortran::lower::intrinsicRequiresCustomOptionalHandling(
+ procRef, intrinsic, converter)) {
+ using ExvAndPresence = std::pair<ExtValue, llvm::Optional<mlir::Value>>;
+ llvm::SmallVector<ExvAndPresence, 4> operands;
+ auto prepareOptionalArg = [&](const Fortran::lower::SomeExpr &expr) {
+ ExtValue optionalArg = lowerIntrinsicArgumentAsInquired(expr);
+ mlir::Value isPresent =
+ genActualIsPresentTest(builder, loc, optionalArg);
+ operands.emplace_back(optionalArg, isPresent);
+ };
+ auto prepareOtherArg = [&](const Fortran::lower::SomeExpr &expr) {
+ operands.emplace_back(genval(expr), llvm::None);
+ };
+ Fortran::lower::prepareCustomIntrinsicArgument(
+ procRef, intrinsic, resultType, prepareOptionalArg, prepareOtherArg,
+ converter);
+
+ auto getArgument = [&](std::size_t i) -> ExtValue {
+ if (fir::conformsWithPassByRef(
+ fir::getBase(operands[i].first).getType()))
+ return genLoad(operands[i].first);
+ return operands[i].first;
+ };
+ auto isPresent = [&](std::size_t i) -> llvm::Optional<mlir::Value> {
+ return operands[i].second;
+ };
+ return Fortran::lower::lowerCustomIntrinsic(
+ builder, loc, name, resultType, isPresent, getArgument,
+ operands.size(), stmtCtx);
+ }
+
+ const Fortran::lower::IntrinsicArgumentLoweringRules *argLowering =
+ Fortran::lower::getIntrinsicArgumentLowering(name);
+ for (const auto &[arg, dummy] :
+ llvm::zip(procRef.arguments(),
+ intrinsic.characteristics.value().dummyArguments)) {
+ auto *expr = Fortran::evaluate::UnwrapExpr<Fortran::lower::SomeExpr>(arg);
+ if (!expr) {
+ // Absent optional.
+ operands.emplace_back(Fortran::lower::getAbsentIntrinsicArgument());
+ continue;
+ }
+ if (!argLowering) {
+ // No argument lowering instruction, lower by value.
+ operands.emplace_back(genval(*expr));
+ continue;
+ }
+ // Ad-hoc argument lowering handling.
+ Fortran::lower::ArgLoweringRule argRules =
+ Fortran::lower::lowerIntrinsicArgumentAs(loc, *argLowering,
+ dummy.name);
+ if (argRules.handleDynamicOptional &&
+ Fortran::evaluate::MayBePassedAsAbsentOptional(
+ *expr, converter.getFoldingContext())) {
+ ExtValue optional = lowerIntrinsicArgumentAsInquired(*expr);
+ mlir::Value isPresent = genActualIsPresentTest(builder, loc, optional);
+ switch (argRules.lowerAs) {
+ case Fortran::lower::LowerIntrinsicArgAs::Value:
+ operands.emplace_back(
+ genOptionalValue(builder, loc, optional, isPresent));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Addr:
+ operands.emplace_back(
+ genOptionalAddr(builder, loc, optional, isPresent));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Box:
+ operands.emplace_back(
+ genOptionalBox(builder, loc, optional, isPresent));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Inquired:
+ operands.emplace_back(optional);
+ continue;
+ }
+ llvm_unreachable("bad switch");
+ }
+ switch (argRules.lowerAs) {
+ case Fortran::lower::LowerIntrinsicArgAs::Value:
+ operands.emplace_back(genval(*expr));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Addr:
+ operands.emplace_back(gen(*expr));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Box:
+ operands.emplace_back(lowerIntrinsicArgumentAsBox(*expr));
+ continue;
+ case Fortran::lower::LowerIntrinsicArgAs::Inquired:
+ operands.emplace_back(lowerIntrinsicArgumentAsInquired(*expr));
+ continue;
+ }
+ llvm_unreachable("bad switch");
+ }
+ // Let the intrinsic library lower the intrinsic procedure call
+ return Fortran::lower::genIntrinsicCall(builder, getLoc(), name, resultType,
+ operands, stmtCtx);
+ }
+
+ template <typename A>
+ bool isCharacterType(const A &exp) {
+ if (auto type = exp.GetType())
+ return type->category() == Fortran::common::TypeCategory::Character;
+ return false;
+ }
+
/// helper to detect statement functions
static bool
isStatementFunctionCall(const Fortran::evaluate::ProcedureRef &procRef) {
// variable could also be modified by other means during the call.
if (!isParenthesizedVariable(expr))
return genExtAddr(expr);
- mlir::Location loc = getLoc();
if (expr.Rank() > 0)
- TODO(loc, "genTempExtAddr array");
+ return asArray(expr);
+ mlir::Location loc = getLoc();
return genExtValue(expr).match(
[&](const fir::CharBoxValue &boxChar) -> ExtValue {
- TODO(loc, "genTempExtAddr CharBoxValue");
+ return fir::factory::CharacterExprHelper{builder, loc}.createTempFrom(
+ boxChar);
},
[&](const fir::UnboxedValue &v) -> ExtValue {
mlir::Type type = v.getType();
return genProcedureRef(procRef, resTy);
}
- /// Helper to lower intrinsic arguments for inquiry intrinsic.
- ExtValue
- lowerIntrinsicArgumentAsInquired(const Fortran::lower::SomeExpr &expr) {
- if (Fortran::evaluate::IsAllocatableOrPointerObject(
- expr, converter.getFoldingContext()))
- return genMutableBoxValue(expr);
- return gen(expr);
+ template <typename A>
+ bool isScalar(const A &x) {
+ return x.Rank() == 0;
}
- /// Helper to lower intrinsic arguments to a fir::BoxValue.
- /// It preserves all the non default lower bounds/non deferred length
- /// parameter information.
- ExtValue lowerIntrinsicArgumentAsBox(const Fortran::lower::SomeExpr &expr) {
- mlir::Location loc = getLoc();
- ExtValue exv = genBoxArg(expr);
- mlir::Value box = builder.createBox(loc, exv);
- return fir::BoxValue(
- box, fir::factory::getNonDefaultLowerBounds(builder, loc, exv),
- fir::factory::getNonDeferredLengthParams(exv));
+ /// Helper to detect Transformational function reference.
+ template <typename T>
+ bool isTransformationalRef(const T &) {
+ return false;
+ }
+ template <typename T>
+ bool isTransformationalRef(const Fortran::evaluate::FunctionRef<T> &funcRef) {
+ return !funcRef.IsElemental() && funcRef.Rank();
+ }
+ template <typename T>
+ bool isTransformationalRef(Fortran::evaluate::Expr<T> expr) {
+ return std::visit([&](const auto &e) { return isTransformationalRef(e); },
+ expr.u);
}
- /// Generate a call to an intrinsic function.
- ExtValue
- genIntrinsicRef(const Fortran::evaluate::ProcedureRef &procRef,
- const Fortran::evaluate::SpecificIntrinsic &intrinsic,
- llvm::Optional<mlir::Type> resultType) {
- llvm::SmallVector<ExtValue> operands;
-
- llvm::StringRef name = intrinsic.name;
- mlir::Location loc = getLoc();
- if (Fortran::lower::intrinsicRequiresCustomOptionalHandling(
- procRef, intrinsic, converter)) {
- using ExvAndPresence = std::pair<ExtValue, llvm::Optional<mlir::Value>>;
- llvm::SmallVector<ExvAndPresence, 4> operands;
- auto prepareOptionalArg = [&](const Fortran::lower::SomeExpr &expr) {
- ExtValue optionalArg = lowerIntrinsicArgumentAsInquired(expr);
- mlir::Value isPresent =
- genActualIsPresentTest(builder, loc, optionalArg);
- operands.emplace_back(optionalArg, isPresent);
- };
- auto prepareOtherArg = [&](const Fortran::lower::SomeExpr &expr) {
- operands.emplace_back(genval(expr), llvm::None);
- };
- Fortran::lower::prepareCustomIntrinsicArgument(
- procRef, intrinsic, resultType, prepareOptionalArg, prepareOtherArg,
- converter);
-
- auto getArgument = [&](std::size_t i) -> ExtValue {
- if (fir::conformsWithPassByRef(
- fir::getBase(operands[i].first).getType()))
- return genLoad(operands[i].first);
- return operands[i].first;
- };
- auto isPresent = [&](std::size_t i) -> llvm::Optional<mlir::Value> {
- return operands[i].second;
- };
- return Fortran::lower::lowerCustomIntrinsic(
- builder, loc, name, resultType, isPresent, getArgument,
- operands.size(), stmtCtx);
- }
-
- const Fortran::lower::IntrinsicArgumentLoweringRules *argLowering =
- Fortran::lower::getIntrinsicArgumentLowering(name);
- for (const auto &[arg, dummy] :
- llvm::zip(procRef.arguments(),
- intrinsic.characteristics.value().dummyArguments)) {
- auto *expr = Fortran::evaluate::UnwrapExpr<Fortran::lower::SomeExpr>(arg);
- if (!expr) {
- // Absent optional.
- operands.emplace_back(Fortran::lower::getAbsentIntrinsicArgument());
- continue;
- }
- if (!argLowering) {
- // No argument lowering instruction, lower by value.
- operands.emplace_back(genval(*expr));
- continue;
- }
- // Ad-hoc argument lowering handling.
- Fortran::lower::ArgLoweringRule argRules =
- Fortran::lower::lowerIntrinsicArgumentAs(loc, *argLowering,
- dummy.name);
- if (argRules.handleDynamicOptional &&
- Fortran::evaluate::MayBePassedAsAbsentOptional(
- *expr, converter.getFoldingContext())) {
- ExtValue optional = lowerIntrinsicArgumentAsInquired(*expr);
- mlir::Value isPresent = genActualIsPresentTest(builder, loc, optional);
- switch (argRules.lowerAs) {
- case Fortran::lower::LowerIntrinsicArgAs::Value:
- operands.emplace_back(
- genOptionalValue(builder, loc, optional, isPresent));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Addr:
- operands.emplace_back(
- genOptionalAddr(builder, loc, optional, isPresent));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Box:
- operands.emplace_back(
- genOptionalBox(builder, loc, optional, isPresent));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Inquired:
- operands.emplace_back(optional);
- continue;
- }
- llvm_unreachable("bad switch");
- }
- switch (argRules.lowerAs) {
- case Fortran::lower::LowerIntrinsicArgAs::Value:
- operands.emplace_back(genval(*expr));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Addr:
- operands.emplace_back(gen(*expr));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Box:
- operands.emplace_back(lowerIntrinsicArgumentAsBox(*expr));
- continue;
- case Fortran::lower::LowerIntrinsicArgAs::Inquired:
- operands.emplace_back(lowerIntrinsicArgumentAsInquired(*expr));
- continue;
- }
- llvm_unreachable("bad switch");
- }
- // Let the intrinsic library lower the intrinsic procedure call
- return Fortran::lower::genIntrinsicCall(builder, getLoc(), name, resultType,
- operands, stmtCtx);
- }
-
- template <typename A>
- bool isScalar(const A &x) {
- return x.Rank() == 0;
- }
-
- /// Helper to detect Transformational function reference.
- template <typename T>
- bool isTransformationalRef(const T &) {
- return false;
- }
- template <typename T>
- bool isTransformationalRef(const Fortran::evaluate::FunctionRef<T> &funcRef) {
- return !funcRef.IsElemental() && funcRef.Rank();
- }
- template <typename T>
- bool isTransformationalRef(Fortran::evaluate::Expr<T> expr) {
- return std::visit([&](const auto &e) { return isTransformationalRef(e); },
- expr.u);
- }
-
- template <typename A>
- ExtValue asArray(const A &x) {
- return Fortran::lower::createSomeArrayTempValue(converter, toEvExpr(x),
- symMap, stmtCtx);
- }
+ template <typename A>
+ ExtValue asArray(const A &x) {
+ return Fortran::lower::createSomeArrayTempValue(converter, toEvExpr(x),
+ symMap, stmtCtx);
+ }
/// Lower an array value as an argument. This argument can be passed as a box
/// value, so it may be possible to avoid making a temporary.
return isAdjustedArrayElementType(ty.getEleTy());
return false;
}
-
-/// Build an ExtendedValue from a fir.array<?x...?xT> without actually setting
-/// the actual extents and lengths. This is only to allow their propagation as
-/// ExtendedValue without triggering verifier failures when propagating
-/// character/arrays as unboxed values. Only the base of the resulting
-/// ExtendedValue should be used, it is undefined to use the length or extents
-/// of the extended value returned,
-inline static fir::ExtendedValue
-convertToArrayBoxValue(mlir::Location loc, fir::FirOpBuilder &builder,
- mlir::Value val, mlir::Value len) {
- mlir::Type ty = fir::unwrapRefType(val.getType());
- mlir::IndexType idxTy = builder.getIndexType();
- auto seqTy = ty.cast<fir::SequenceType>();
- auto undef = builder.create<fir::UndefOp>(loc, idxTy);
- llvm::SmallVector<mlir::Value> extents(seqTy.getDimension(), undef);
- if (fir::isa_char(seqTy.getEleTy()))
- return fir::CharArrayBoxValue(val, len ? len : undef, extents);
- return fir::ArrayBoxValue(val, extents);
+static mlir::Type adjustedArrayElementType(mlir::Type t) {
+ return isAdjustedArrayElementType(t) ? fir::ReferenceType::get(t) : t;
}
/// Helper to generate calls to scalar user defined assignment procedures.
return amend;
}
+/// Build an ExtendedValue from a fir.array<?x...?xT> without actually setting
+/// the actual extents and lengths. This is only to allow their propagation as
+/// ExtendedValue without triggering verifier failures when propagating
+/// character/arrays as unboxed values. Only the base of the resulting
+/// ExtendedValue should be used, it is undefined to use the length or extents
+/// of the extended value returned,
+inline static fir::ExtendedValue
+convertToArrayBoxValue(mlir::Location loc, fir::FirOpBuilder &builder,
+ mlir::Value val, mlir::Value len) {
+ mlir::Type ty = fir::unwrapRefType(val.getType());
+ mlir::IndexType idxTy = builder.getIndexType();
+ auto seqTy = ty.cast<fir::SequenceType>();
+ auto undef = builder.create<fir::UndefOp>(loc, idxTy);
+ llvm::SmallVector<mlir::Value> extents(seqTy.getDimension(), undef);
+ if (fir::isa_char(seqTy.getEleTy()))
+ return fir::CharArrayBoxValue(val, len ? len : undef, extents);
+ return fir::ArrayBoxValue(val, extents);
+}
+
//===----------------------------------------------------------------------===//
//
// Lowering of array expressions.
return lexv;
}
- bool explicitSpaceIsActive() const {
- return explicitSpace && explicitSpace->isActive();
+private:
+ void determineShapeOfDest(const fir::ExtendedValue &lhs) {
+ destShape = fir::factory::getExtents(builder, getLoc(), lhs);
}
- bool implicitSpaceHasMasks() const {
- return implicitSpace && !implicitSpace->empty();
+ void determineShapeOfDest(const Fortran::lower::SomeExpr &lhs) {
+ if (!destShape.empty())
+ return;
+ if (explicitSpaceIsActive() && determineShapeWithSlice(lhs))
+ return;
+ mlir::Type idxTy = builder.getIndexType();
+ mlir::Location loc = getLoc();
+ if (std::optional<Fortran::evaluate::ConstantSubscripts> constantShape =
+ Fortran::evaluate::GetConstantExtents(converter.getFoldingContext(),
+ lhs))
+ for (Fortran::common::ConstantSubscript extent : *constantShape)
+ destShape.push_back(builder.createIntegerConstant(loc, idxTy, extent));
}
- CC genMaskAccess(mlir::Value tmp, mlir::Value shape) {
+ bool genShapeFromDataRef(const Fortran::semantics::Symbol &x) {
+ return false;
+ }
+ bool genShapeFromDataRef(const Fortran::evaluate::CoarrayRef &) {
+ TODO(getLoc(), "coarray ref");
+ return false;
+ }
+ bool genShapeFromDataRef(const Fortran::evaluate::Component &x) {
+ return x.base().Rank() > 0 ? genShapeFromDataRef(x.base()) : false;
+ }
+ bool genShapeFromDataRef(const Fortran::evaluate::ArrayRef &x) {
+ if (x.Rank() == 0)
+ return false;
+ if (x.base().Rank() > 0)
+ if (genShapeFromDataRef(x.base()))
+ return true;
+ // x has rank and x.base did not produce a shape.
+ ExtValue exv = x.base().IsSymbol() ? asScalarRef(getFirstSym(x.base()))
+ : asScalarRef(x.base().GetComponent());
mlir::Location loc = getLoc();
- return [=, builder = &converter.getFirOpBuilder()](IterSpace iters) {
- mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(tmp.getType());
- auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
- mlir::Type eleRefTy = builder->getRefType(eleTy);
- mlir::IntegerType i1Ty = builder->getI1Type();
- // Adjust indices for any shift of the origin of the array.
- llvm::SmallVector<mlir::Value> indices = fir::factory::originateIndices(
- loc, *builder, tmp.getType(), shape, iters.iterVec());
- auto addr = builder->create<fir::ArrayCoorOp>(
- loc, eleRefTy, tmp, shape, /*slice=*/mlir::Value{}, indices,
- /*typeParams=*/llvm::None);
- auto load = builder->create<fir::LoadOp>(loc, addr);
- return builder->createConvert(loc, i1Ty, load);
- };
+ mlir::IndexType idxTy = builder.getIndexType();
+ llvm::SmallVector<mlir::Value> definedShape =
+ fir::factory::getExtents(builder, loc, exv);
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+ for (auto ss : llvm::enumerate(x.subscript())) {
+ std::visit(Fortran::common::visitors{
+ [&](const Fortran::evaluate::Triplet &trip) {
+ // For a subscript of triple notation, we compute the
+ // range of this dimension of the iteration space.
+ auto lo = [&]() {
+ if (auto optLo = trip.lower())
+ return fir::getBase(asScalar(*optLo));
+ return getLBound(exv, ss.index(), one);
+ }();
+ auto hi = [&]() {
+ if (auto optHi = trip.upper())
+ return fir::getBase(asScalar(*optHi));
+ return getUBound(exv, ss.index(), one);
+ }();
+ auto step = builder.createConvert(
+ loc, idxTy, fir::getBase(asScalar(trip.stride())));
+ auto extent = builder.genExtentFromTriplet(loc, lo, hi,
+ step, idxTy);
+ destShape.push_back(extent);
+ },
+ [&](auto) {}},
+ ss.value().u);
+ }
+ return true;
+ }
+ bool genShapeFromDataRef(const Fortran::evaluate::NamedEntity &x) {
+ if (x.IsSymbol())
+ return genShapeFromDataRef(getFirstSym(x));
+ return genShapeFromDataRef(x.GetComponent());
+ }
+ bool genShapeFromDataRef(const Fortran::evaluate::DataRef &x) {
+ return std::visit([&](const auto &v) { return genShapeFromDataRef(v); },
+ x.u);
}
- /// Construct the incremental instantiations of the ragged array structure.
- /// Rebind the lazy buffer variable, etc. as we go.
- template <bool withAllocation = false>
- mlir::Value prepareRaggedArrays(Fortran::lower::FrontEndExpr expr) {
- assert(explicitSpaceIsActive());
- mlir::Location loc = getLoc();
- mlir::TupleType raggedTy = fir::factory::getRaggedArrayHeaderType(builder);
- llvm::SmallVector<llvm::SmallVector<fir::DoLoopOp>> loopStack =
- explicitSpace->getLoopStack();
- const std::size_t depth = loopStack.size();
- mlir::IntegerType i64Ty = builder.getIntegerType(64);
- [[maybe_unused]] mlir::Value byteSize =
- builder.createIntegerConstant(loc, i64Ty, 1);
- mlir::Value header = implicitSpace->lookupMaskHeader(expr);
- for (std::remove_const_t<decltype(depth)> i = 0; i < depth; ++i) {
- auto insPt = builder.saveInsertionPoint();
- if (i < depth - 1)
- builder.setInsertionPoint(loopStack[i + 1][0]);
+ /// When in an explicit space, the ranked component must be evaluated to
+ /// determine the actual number of iterations when slicing triples are
+ /// present. Lower these expressions here.
+ bool determineShapeWithSlice(const Fortran::lower::SomeExpr &lhs) {
+ LLVM_DEBUG(Fortran::lower::DumpEvaluateExpr::dump(
+ llvm::dbgs() << "determine shape of:\n", lhs));
+ // FIXME: We may not want to use ExtractDataRef here since it doesn't deal
+ // with substrings, etc.
+ std::optional<Fortran::evaluate::DataRef> dref =
+ Fortran::evaluate::ExtractDataRef(lhs);
+ return dref.has_value() ? genShapeFromDataRef(*dref) : false;
+ }
- // Compute and gather the extents.
- llvm::SmallVector<mlir::Value> extents;
- for (auto doLoop : loopStack[i])
- extents.push_back(builder.genExtentFromTriplet(
- loc, doLoop.getLowerBound(), doLoop.getUpperBound(),
- doLoop.getStep(), i64Ty));
- if constexpr (withAllocation) {
- fir::runtime::genRaggedArrayAllocate(
- loc, builder, header, /*asHeader=*/true, byteSize, extents);
- }
+ /// CHARACTER and derived type elements are treated as memory references. The
+ /// numeric types are treated as values.
+ static mlir::Type adjustedArraySubtype(mlir::Type ty,
+ mlir::ValueRange indices) {
+ mlir::Type pathTy = fir::applyPathToType(ty, indices);
+ assert(pathTy && "indices failed to apply to type");
+ return adjustedArrayElementType(pathTy);
+ }
- // Compute the dynamic position into the header.
+ ExtValue lowerArrayExpression(const Fortran::lower::SomeExpr &exp) {
+ mlir::Type resTy = converter.genType(exp);
+ return std::visit(
+ [&](const auto &e) { return lowerArrayExpression(genarr(e), resTy); },
+ exp.u);
+ }
+ ExtValue lowerArrayExpression(const ExtValue &exv) {
+ assert(!explicitSpace);
+ mlir::Type resTy = fir::unwrapPassByRefType(fir::getBase(exv).getType());
+ return lowerArrayExpression(genarr(exv), resTy);
+ }
+
+ void populateBounds(llvm::SmallVectorImpl<mlir::Value> &bounds,
+ const Fortran::evaluate::Substring *substring) {
+ if (!substring)
+ return;
+ bounds.push_back(fir::getBase(asScalar(substring->lower())));
+ if (auto upper = substring->upper())
+ bounds.push_back(fir::getBase(asScalar(*upper)));
+ }
+
+ /// Default store to destination implementation.
+ /// This implements the default case, which is to assign the value in
+ /// `iters.element` into the destination array, `iters.innerArgument`. Handles
+ /// by value and by reference assignment.
+ CC defaultStoreToDestination(const Fortran::evaluate::Substring *substring) {
+ return [=](IterSpace iterSpace) -> ExtValue {
+ mlir::Location loc = getLoc();
+ mlir::Value innerArg = iterSpace.innerArgument();
+ fir::ExtendedValue exv = iterSpace.elementExv();
+ mlir::Type arrTy = innerArg.getType();
+ mlir::Type eleTy = fir::applyPathToType(arrTy, iterSpace.iterVec());
+ if (isAdjustedArrayElementType(eleTy)) {
+ // The elemental update is in the memref domain. Under this semantics,
+ // we must always copy the computed new element from its location in
+ // memory into the destination array.
+ mlir::Type resRefTy = builder.getRefType(eleTy);
+ // Get a reference to the array element to be amended.
+ auto arrayOp = builder.create<fir::ArrayAccessOp>(
+ loc, resRefTy, innerArg, iterSpace.iterVec(),
+ destination.getTypeparams());
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ llvm::SmallVector<mlir::Value> substringBounds;
+ populateBounds(substringBounds, substring);
+ mlir::Value dstLen = fir::factory::genLenOfCharacter(
+ builder, loc, destination, iterSpace.iterVec(), substringBounds);
+ fir::ArrayAmendOp amend = createCharArrayAmend(
+ loc, builder, arrayOp, dstLen, exv, innerArg, substringBounds);
+ return abstractArrayExtValue(amend, dstLen);
+ }
+ if (fir::isa_derived(eleTy)) {
+ fir::ArrayAmendOp amend = createDerivedArrayAmend(
+ loc, destination, builder, arrayOp, exv, eleTy, innerArg);
+ return abstractArrayExtValue(amend /*FIXME: typeparams?*/);
+ }
+ assert(eleTy.isa<fir::SequenceType>() && "must be an array");
+ TODO(loc, "array (as element) assignment");
+ }
+ // By value semantics. The element is being assigned by value.
+ mlir::Value ele = builder.createConvert(loc, eleTy, fir::getBase(exv));
+ auto update = builder.create<fir::ArrayUpdateOp>(
+ loc, arrTy, innerArg, ele, iterSpace.iterVec(),
+ destination.getTypeparams());
+ return abstractArrayExtValue(update);
+ };
+ }
+
+ /// For an elemental array expression.
+ /// 1. Lower the scalars and array loads.
+ /// 2. Create the iteration space.
+ /// 3. Create the element-by-element computation in the loop.
+ /// 4. Return the resulting array value.
+ /// If no destination was set in the array context, a temporary of
+ /// \p resultTy will be created to hold the evaluated expression.
+ /// Otherwise, \p resultTy is ignored and the expression is evaluated
+ /// in the destination. \p f is a continuation built from an
+ /// evaluate::Expr or an ExtendedValue.
+ ExtValue lowerArrayExpression(CC f, mlir::Type resultTy) {
+ mlir::Location loc = getLoc();
+ auto [iterSpace, insPt] = genIterSpace(resultTy);
+ auto exv = f(iterSpace);
+ iterSpace.setElement(std::move(exv));
+ auto lambda = ccStoreToDest.hasValue()
+ ? ccStoreToDest.getValue()
+ : defaultStoreToDestination(/*substring=*/nullptr);
+ mlir::Value updVal = fir::getBase(lambda(iterSpace));
+ finalizeElementCtx();
+ builder.create<fir::ResultOp>(loc, updVal);
+ builder.restoreInsertionPoint(insPt);
+ return abstractArrayExtValue(iterSpace.outerResult());
+ }
+
+ /// Compute the shape of a slice.
+ llvm::SmallVector<mlir::Value> computeSliceShape(mlir::Value slice) {
+ llvm::SmallVector<mlir::Value> slicedShape;
+ auto slOp = mlir::cast<fir::SliceOp>(slice.getDefiningOp());
+ mlir::Operation::operand_range triples = slOp.getTriples();
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Location loc = getLoc();
+ for (unsigned i = 0, end = triples.size(); i < end; i += 3) {
+ if (!mlir::isa_and_nonnull<fir::UndefOp>(
+ triples[i + 1].getDefiningOp())) {
+ // (..., lb:ub:step, ...) case: extent = max((ub-lb+step)/step, 0)
+ // See Fortran 2018 9.5.3.3.2 section for more details.
+ mlir::Value res = builder.genExtentFromTriplet(
+ loc, triples[i], triples[i + 1], triples[i + 2], idxTy);
+ slicedShape.emplace_back(res);
+ } else {
+ // do nothing. `..., i, ...` case, so dimension is dropped.
+ }
+ }
+ return slicedShape;
+ }
+
+ /// Get the shape from an ArrayOperand. The shape of the array is adjusted if
+ /// the array was sliced.
+ llvm::SmallVector<mlir::Value> getShape(ArrayOperand array) {
+ if (array.slice)
+ return computeSliceShape(array.slice);
+ if (array.memref.getType().isa<fir::BoxType>())
+ return fir::factory::readExtents(builder, getLoc(),
+ fir::BoxValue{array.memref});
+ std::vector<mlir::Value, std::allocator<mlir::Value>> extents =
+ fir::factory::getExtents(array.shape);
+ return {extents.begin(), extents.end()};
+ }
+
+ /// Get the shape from an ArrayLoad.
+ llvm::SmallVector<mlir::Value> getShape(fir::ArrayLoadOp arrayLoad) {
+ return getShape(ArrayOperand{arrayLoad.getMemref(), arrayLoad.getShape(),
+ arrayLoad.getSlice()});
+ }
+
+ /// Returns the first array operand that may not be absent. If all
+ /// array operands may be absent, return the first one.
+ const ArrayOperand &getInducingShapeArrayOperand() const {
+ assert(!arrayOperands.empty());
+ for (const ArrayOperand &op : arrayOperands)
+ if (!op.mayBeAbsent)
+ return op;
+ // If all arrays operand appears in optional position, then none of them
+ // is allowed to be absent as per 15.5.2.12 point 3. (6). Just pick the
+ // first operands.
+ // TODO: There is an opportunity to add a runtime check here that
+ // this array is present as required.
+ return arrayOperands[0];
+ }
+
+ /// Generate the shape of the iteration space over the array expression. The
+ /// iteration space may be implicit, explicit, or both. If it is implied it is
+ /// based on the destination and operand array loads, or an optional
+ /// Fortran::evaluate::Shape from the front end. If the shape is explicit,
+ /// this returns any implicit shape component, if it exists.
+ llvm::SmallVector<mlir::Value> genIterationShape() {
+ // Use the precomputed destination shape.
+ if (!destShape.empty())
+ return destShape;
+ // Otherwise, use the destination's shape.
+ if (destination)
+ return getShape(destination);
+ // Otherwise, use the first ArrayLoad operand shape.
+ if (!arrayOperands.empty())
+ return getShape(getInducingShapeArrayOperand());
+ fir::emitFatalError(getLoc(),
+ "failed to compute the array expression shape");
+ }
+
+ bool explicitSpaceIsActive() const {
+ return explicitSpace && explicitSpace->isActive();
+ }
+
+ bool implicitSpaceHasMasks() const {
+ return implicitSpace && !implicitSpace->empty();
+ }
+
+ CC genMaskAccess(mlir::Value tmp, mlir::Value shape) {
+ mlir::Location loc = getLoc();
+ return [=, builder = &converter.getFirOpBuilder()](IterSpace iters) {
+ mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(tmp.getType());
+ auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
+ mlir::Type eleRefTy = builder->getRefType(eleTy);
+ mlir::IntegerType i1Ty = builder->getI1Type();
+ // Adjust indices for any shift of the origin of the array.
+ llvm::SmallVector<mlir::Value> indices = fir::factory::originateIndices(
+ loc, *builder, tmp.getType(), shape, iters.iterVec());
+ auto addr = builder->create<fir::ArrayCoorOp>(
+ loc, eleRefTy, tmp, shape, /*slice=*/mlir::Value{}, indices,
+ /*typeParams=*/llvm::None);
+ auto load = builder->create<fir::LoadOp>(loc, addr);
+ return builder->createConvert(loc, i1Ty, load);
+ };
+ }
+
+ /// Construct the incremental instantiations of the ragged array structure.
+ /// Rebind the lazy buffer variable, etc. as we go.
+ template <bool withAllocation = false>
+ mlir::Value prepareRaggedArrays(Fortran::lower::FrontEndExpr expr) {
+ assert(explicitSpaceIsActive());
+ mlir::Location loc = getLoc();
+ mlir::TupleType raggedTy = fir::factory::getRaggedArrayHeaderType(builder);
+ llvm::SmallVector<llvm::SmallVector<fir::DoLoopOp>> loopStack =
+ explicitSpace->getLoopStack();
+ const std::size_t depth = loopStack.size();
+ mlir::IntegerType i64Ty = builder.getIntegerType(64);
+ [[maybe_unused]] mlir::Value byteSize =
+ builder.createIntegerConstant(loc, i64Ty, 1);
+ mlir::Value header = implicitSpace->lookupMaskHeader(expr);
+ for (std::remove_const_t<decltype(depth)> i = 0; i < depth; ++i) {
+ auto insPt = builder.saveInsertionPoint();
+ if (i < depth - 1)
+ builder.setInsertionPoint(loopStack[i + 1][0]);
+
+ // Compute and gather the extents.
+ llvm::SmallVector<mlir::Value> extents;
+ for (auto doLoop : loopStack[i])
+ extents.push_back(builder.genExtentFromTriplet(
+ loc, doLoop.getLowerBound(), doLoop.getUpperBound(),
+ doLoop.getStep(), i64Ty));
+ if constexpr (withAllocation) {
+ fir::runtime::genRaggedArrayAllocate(
+ loc, builder, header, /*asHeader=*/true, byteSize, extents);
+ }
+
+ // Compute the dynamic position into the header.
llvm::SmallVector<mlir::Value> offsets;
for (auto doLoop : loopStack[i]) {
auto m = builder.create<mlir::arith::SubIOp>(
builder.create<fir::ResultOp>(loc, innerArg);
builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
};
- for (std::size_t i = 0; i < size; ++i)
+ for (std::remove_const_t<decltype(size)> i = 0; i < size; ++i)
if (const auto *e = maskExprs[i])
genFalseBlock(e, genCond(e, iters));
.lowerIntrinsicArgumentAsInquired(x);
}
- // An expression with non-zero rank is an array expression.
- template <typename A>
- bool isArray(const A &x) const {
- return x.Rank() != 0;
- }
-
/// Some temporaries are allocated on an element-by-element basis during the
/// array expression evaluation. Collect the cleanups here so the resources
/// can be freed before the next loop iteration, avoiding memory leaks. etc.
procRef, retTy));
}
+ CC genarr(const Fortran::evaluate::ProcedureDesignator &) {
+ TODO(getLoc(), "procedure designator");
+ }
+ CC genarr(const Fortran::evaluate::ProcedureRef &x) {
+ if (x.hasAlternateReturns())
+ fir::emitFatalError(getLoc(),
+ "array procedure reference with alt-return");
+ return genProcRef(x, llvm::None);
+ }
template <typename A>
CC genScalarAndForwardValue(const A &x) {
ExtValue result = asScalar(x);
return [=](IterSpace) { return result; };
}
-
template <typename A, typename = std::enable_if_t<Fortran::common::HasMember<
A, Fortran::evaluate::TypelessExpression>>>
CC genarr(const A &x) {
template <int KIND>
CC genarr(const Fortran::evaluate::ComplexComponent<KIND> &x) {
- TODO(getLoc(), "ComplexComponent<KIND>");
+ mlir::Location loc = getLoc();
+ auto lambda = genarr(x.left());
+ bool isImagPart = x.isImaginaryPart;
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value lhs = fir::getBase(lambda(iters));
+ return fir::factory::Complex{builder, loc}.extractComplexPart(lhs,
+ isImagPart);
+ };
}
template <typename T>
template <Fortran::common::TypeCategory TC, int KIND>
CC genarr(
const Fortran::evaluate::Extremum<Fortran::evaluate::Type<TC, KIND>> &x) {
- TODO(getLoc(), "genarr Extremum<Fortran::evaluate::Type<TC, KIND>>");
+ mlir::Location loc = getLoc();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ switch (x.ordering) {
+ case Fortran::evaluate::Ordering::Greater:
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value lhs = fir::getBase(lf(iters));
+ mlir::Value rhs = fir::getBase(rf(iters));
+ return Fortran::lower::genMax(builder, loc,
+ llvm::ArrayRef<mlir::Value>{lhs, rhs});
+ };
+ case Fortran::evaluate::Ordering::Less:
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value lhs = fir::getBase(lf(iters));
+ mlir::Value rhs = fir::getBase(rf(iters));
+ return Fortran::lower::genMin(builder, loc,
+ llvm::ArrayRef<mlir::Value>{lhs, rhs});
+ };
+ case Fortran::evaluate::Ordering::Equal:
+ llvm_unreachable("Equal is not a valid ordering in this context");
+ }
+ llvm_unreachable("unknown ordering");
}
template <Fortran::common::TypeCategory TC, int KIND>
CC genarr(
const Fortran::evaluate::RealToIntPower<Fortran::evaluate::Type<TC, KIND>>
&x) {
- TODO(getLoc(), "genarr RealToIntPower<Fortran::evaluate::Type<TC, KIND>>");
+ mlir::Location loc = getLoc();
+ auto ty = converter.genType(TC, KIND);
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ return [=](IterSpace iters) {
+ mlir::Value lhs = fir::getBase(lf(iters));
+ mlir::Value rhs = fir::getBase(rf(iters));
+ return Fortran::lower::genPow(builder, loc, ty, lhs, rhs);
+ };
}
template <int KIND>
CC genarr(const Fortran::evaluate::ComplexConstructor<KIND> &x) {
- TODO(getLoc(), "genarr ComplexConstructor<KIND>");
- }
-
- /// Fortran's concatenation operator `//`.
- template <int KIND>
- CC genarr(const Fortran::evaluate::Concat<KIND> &x) {
mlir::Location loc = getLoc();
auto lf = genarr(x.left());
auto rf = genarr(x.right());
return [=](IterSpace iters) -> ExtValue {
- auto lhs = lf(iters);
+ mlir::Value lhs = fir::getBase(lf(iters));
+ mlir::Value rhs = fir::getBase(rf(iters));
+ return fir::factory::Complex{builder, loc}.createComplex(KIND, lhs, rhs);
+ };
+ }
+
+ /// Fortran's concatenation operator `//`.
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Concat<KIND> &x) {
+ mlir::Location loc = getLoc();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ return [=](IterSpace iters) -> ExtValue {
+ auto lhs = lf(iters);
auto rhs = rf(iters);
const fir::CharBoxValue *lchr = lhs.getCharBox();
const fir::CharBoxValue *rchr = rhs.getCharBox();
template <typename A>
ExtValue genArrayBase(const A &base) {
ScalarExprLowering sel{getLoc(), converter, symMap, stmtCtx};
- return base.IsSymbol() ? sel.gen(base.GetFirstSymbol())
+ return base.IsSymbol() ? sel.gen(getFirstSym(base))
: sel.gen(base.GetComponent());
}
trips.clear();
}
+ static mlir::Type unwrapBoxEleTy(mlir::Type ty) {
+ if (auto boxTy = ty.dyn_cast<fir::BoxType>())
+ return fir::unwrapRefType(boxTy.getEleTy());
+ return ty;
+ }
+
+ llvm::SmallVector<mlir::Value> getShape(mlir::Type ty) {
+ llvm::SmallVector<mlir::Value> result;
+ ty = unwrapBoxEleTy(ty);
+ mlir::Location loc = getLoc();
+ mlir::IndexType idxTy = builder.getIndexType();
+ for (auto extent : ty.cast<fir::SequenceType>().getShape()) {
+ auto v = extent == fir::SequenceType::getUnknownExtent()
+ ? builder.create<fir::UndefOp>(loc, idxTy).getResult()
+ : builder.createIntegerConstant(loc, idxTy, extent);
+ result.push_back(v);
+ }
+ return result;
+ }
+
CC genarr(const Fortran::semantics::SymbolRef &sym,
ComponentPath &components) {
return genarr(sym.get(), components);
return genarr(extMemref, dummy);
}
- //===--------------------------------------------------------------------===//
- // Array construction
- //===--------------------------------------------------------------------===//
-
- /// Target agnostic computation of the size of an element in the array.
- /// Returns the size in bytes with type `index` or a null Value if the element
- /// size is not constant.
- mlir::Value computeElementSize(const ExtValue &exv, mlir::Type eleTy,
- mlir::Type resTy) {
+ /// Base case of generating an array reference,
+ CC genarr(const ExtValue &extMemref, ComponentPath &components) {
mlir::Location loc = getLoc();
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Value multiplier = builder.createIntegerConstant(loc, idxTy, 1);
- if (fir::hasDynamicSize(eleTy)) {
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- // Array of char with dynamic length parameter. Downcast to an array
- // of singleton char, and scale by the len type parameter from
- // `exv`.
- exv.match(
- [&](const fir::CharBoxValue &cb) { multiplier = cb.getLen(); },
- [&](const fir::CharArrayBoxValue &cb) { multiplier = cb.getLen(); },
- [&](const fir::BoxValue &box) {
- multiplier = fir::factory::CharacterExprHelper(builder, loc)
- .readLengthFromBox(box.getAddr());
- },
- [&](const fir::MutableBoxValue &box) {
- multiplier = fir::factory::CharacterExprHelper(builder, loc)
- .readLengthFromBox(box.getAddr());
- },
- [&](const auto &) {
- fir::emitFatalError(loc,
- "array constructor element has unknown size");
- });
- fir::CharacterType newEleTy = fir::CharacterType::getSingleton(
- eleTy.getContext(), charTy.getFKind());
- if (auto seqTy = resTy.dyn_cast<fir::SequenceType>()) {
- assert(eleTy == seqTy.getEleTy());
- resTy = fir::SequenceType::get(seqTy.getShape(), newEleTy);
+ mlir::Value memref = fir::getBase(extMemref);
+ mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(memref.getType());
+ assert(arrTy.isa<fir::SequenceType>() && "memory ref must be an array");
+ mlir::Value shape = builder.createShape(loc, extMemref);
+ mlir::Value slice;
+ if (components.isSlice()) {
+ if (isBoxValue() && components.substring) {
+ // Append the substring operator to emboxing Op as it will become an
+ // interior adjustment (add offset, adjust LEN) to the CHARACTER value
+ // being referenced in the descriptor.
+ llvm::SmallVector<mlir::Value> substringBounds;
+ populateBounds(substringBounds, components.substring);
+ // Convert to (offset, size)
+ mlir::Type iTy = substringBounds[0].getType();
+ if (substringBounds.size() != 2) {
+ fir::CharacterType charTy =
+ fir::factory::CharacterExprHelper::getCharType(arrTy);
+ if (charTy.hasConstantLen()) {
+ mlir::IndexType idxTy = builder.getIndexType();
+ fir::CharacterType::LenType charLen = charTy.getLen();
+ mlir::Value lenValue =
+ builder.createIntegerConstant(loc, idxTy, charLen);
+ substringBounds.push_back(lenValue);
+ } else {
+ llvm::SmallVector<mlir::Value> typeparams =
+ fir::getTypeParams(extMemref);
+ substringBounds.push_back(typeparams.back());
+ }
}
- eleTy = newEleTy;
+ // Convert the lower bound to 0-based substring.
+ mlir::Value one =
+ builder.createIntegerConstant(loc, substringBounds[0].getType(), 1);
+ substringBounds[0] =
+ builder.create<mlir::arith::SubIOp>(loc, substringBounds[0], one);
+ // Convert the upper bound to a length.
+ mlir::Value cast = builder.createConvert(loc, iTy, substringBounds[1]);
+ mlir::Value zero = builder.createIntegerConstant(loc, iTy, 0);
+ auto size =
+ builder.create<mlir::arith::SubIOp>(loc, cast, substringBounds[0]);
+ auto cmp = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::sgt, size, zero);
+ // size = MAX(upper - (lower - 1), 0)
+ substringBounds[1] =
+ builder.create<mlir::arith::SelectOp>(loc, cmp, size, zero);
+ slice = builder.create<fir::SliceOp>(loc, components.trips,
+ components.suffixComponents,
+ substringBounds);
} else {
- TODO(loc, "dynamic sized type");
+ slice = builder.createSlice(loc, extMemref, components.trips,
+ components.suffixComponents);
}
- }
- mlir::Type eleRefTy = builder.getRefType(eleTy);
- mlir::Type resRefTy = builder.getRefType(resTy);
- mlir::Value nullPtr = builder.createNullConstant(loc, resRefTy);
- auto offset = builder.create<fir::CoordinateOp>(
- loc, eleRefTy, nullPtr, mlir::ValueRange{multiplier});
- return builder.createConvert(loc, idxTy, offset);
- }
-
- /// Get the function signature of the LLVM memcpy intrinsic.
- mlir::FunctionType memcpyType() {
- return fir::factory::getLlvmMemcpy(builder).getFunctionType();
- }
-
- /// Create a call to the LLVM memcpy intrinsic.
- void createCallMemcpy(llvm::ArrayRef<mlir::Value> args) {
- mlir::Location loc = getLoc();
- mlir::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
- mlir::SymbolRefAttr funcSymAttr =
- builder.getSymbolRefAttr(memcpyFunc.getName());
- mlir::FunctionType funcTy = memcpyFunc.getFunctionType();
- builder.create<fir::CallOp>(loc, funcTy.getResults(), funcSymAttr, args);
- }
-
- // Construct code to check for a buffer overrun and realloc the buffer when
- // space is depleted. This is done between each item in the ac-value-list.
- mlir::Value growBuffer(mlir::Value mem, mlir::Value needed,
- mlir::Value bufferSize, mlir::Value buffSize,
- mlir::Value eleSz) {
- mlir::Location loc = getLoc();
- mlir::FuncOp reallocFunc = fir::factory::getRealloc(builder);
- auto cond = builder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::sle, bufferSize, needed);
- auto ifOp = builder.create<fir::IfOp>(loc, mem.getType(), cond,
- /*withElseRegion=*/true);
- auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
- // Not enough space, resize the buffer.
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Value two = builder.createIntegerConstant(loc, idxTy, 2);
- auto newSz = builder.create<mlir::arith::MulIOp>(loc, needed, two);
- builder.create<fir::StoreOp>(loc, newSz, buffSize);
- mlir::Value byteSz = builder.create<mlir::arith::MulIOp>(loc, newSz, eleSz);
- mlir::SymbolRefAttr funcSymAttr =
- builder.getSymbolRefAttr(reallocFunc.getName());
- mlir::FunctionType funcTy = reallocFunc.getFunctionType();
- auto newMem = builder.create<fir::CallOp>(
- loc, funcTy.getResults(), funcSymAttr,
- llvm::ArrayRef<mlir::Value>{
- builder.createConvert(loc, funcTy.getInputs()[0], mem),
- builder.createConvert(loc, funcTy.getInputs()[1], byteSz)});
- mlir::Value castNewMem =
- builder.createConvert(loc, mem.getType(), newMem.getResult(0));
- builder.create<fir::ResultOp>(loc, castNewMem);
- builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
- // Otherwise, just forward the buffer.
- builder.create<fir::ResultOp>(loc, mem);
- builder.restoreInsertionPoint(insPt);
- return ifOp.getResult(0);
- }
-
- /// Copy the next value (or vector of values) into the array being
- /// constructed.
- mlir::Value copyNextArrayCtorSection(const ExtValue &exv, mlir::Value buffPos,
- mlir::Value buffSize, mlir::Value mem,
- mlir::Value eleSz, mlir::Type eleTy,
- mlir::Type eleRefTy, mlir::Type resTy) {
- mlir::Location loc = getLoc();
- auto off = builder.create<fir::LoadOp>(loc, buffPos);
- auto limit = builder.create<fir::LoadOp>(loc, buffSize);
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
-
- if (fir::isRecordWithAllocatableMember(eleTy))
- TODO(loc, "deep copy on allocatable members");
+ if (components.hasComponents()) {
+ auto seqTy = arrTy.cast<fir::SequenceType>();
+ mlir::Type eleTy =
+ fir::applyPathToType(seqTy.getEleTy(), components.suffixComponents);
+ if (!eleTy)
+ fir::emitFatalError(loc, "slicing path is ill-formed");
+ if (auto realTy = eleTy.dyn_cast<fir::RealType>())
+ eleTy = Fortran::lower::convertReal(realTy.getContext(),
+ realTy.getFKind());
- if (!eleSz) {
- // Compute the element size at runtime.
- assert(fir::hasDynamicSize(eleTy));
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- auto charBytes =
- builder.getKindMap().getCharacterBitsize(charTy.getFKind()) / 8;
- mlir::Value bytes =
- builder.createIntegerConstant(loc, idxTy, charBytes);
- mlir::Value length = fir::getLen(exv);
- if (!length)
- fir::emitFatalError(loc, "result is not boxed character");
- eleSz = builder.create<mlir::arith::MulIOp>(loc, bytes, length);
- } else {
- TODO(loc, "PDT size");
- // Will call the PDT's size function with the type parameters.
+ // create the type of the projected array.
+ arrTy = fir::SequenceType::get(seqTy.getShape(), eleTy);
+ LLVM_DEBUG(llvm::dbgs()
+ << "type of array projection from component slicing: "
+ << eleTy << ", " << arrTy << '\n');
}
}
-
- // Compute the coordinate using `fir.coordinate_of`, or, if the type has
- // dynamic size, generating the pointer arithmetic.
- auto computeCoordinate = [&](mlir::Value buff, mlir::Value off) {
- mlir::Type refTy = eleRefTy;
- if (fir::hasDynamicSize(eleTy)) {
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- // Scale a simple pointer using dynamic length and offset values.
- auto chTy = fir::CharacterType::getSingleton(charTy.getContext(),
- charTy.getFKind());
- refTy = builder.getRefType(chTy);
- mlir::Type toTy = builder.getRefType(builder.getVarLenSeqTy(chTy));
- buff = builder.createConvert(loc, toTy, buff);
- off = builder.create<mlir::arith::MulIOp>(loc, off, eleSz);
- } else {
- TODO(loc, "PDT offset");
- }
+ arrayOperands.push_back(ArrayOperand{memref, shape, slice});
+ if (destShape.empty())
+ destShape = getShape(arrayOperands.back());
+ if (isBoxValue()) {
+ // Semantics are a reference to a boxed array.
+ // This case just requires that an embox operation be created to box the
+ // value. The value of the box is forwarded in the continuation.
+ mlir::Type reduceTy = reduceRank(arrTy, slice);
+ auto boxTy = fir::BoxType::get(reduceTy);
+ if (components.substring) {
+ // Adjust char length to substring size.
+ fir::CharacterType charTy =
+ fir::factory::CharacterExprHelper::getCharType(reduceTy);
+ auto seqTy = reduceTy.cast<fir::SequenceType>();
+ // TODO: Use a constant for fir.char LEN if we can compute it.
+ boxTy = fir::BoxType::get(
+ fir::SequenceType::get(fir::CharacterType::getUnknownLen(
+ builder.getContext(), charTy.getFKind()),
+ seqTy.getDimension()));
}
- auto coor = builder.create<fir::CoordinateOp>(loc, refTy, buff,
- mlir::ValueRange{off});
- return builder.createConvert(loc, eleRefTy, coor);
- };
-
- // Lambda to lower an abstract array box value.
- auto doAbstractArray = [&](const auto &v) {
- // Compute the array size.
- mlir::Value arrSz = one;
- for (auto ext : v.getExtents())
- arrSz = builder.create<mlir::arith::MulIOp>(loc, arrSz, ext);
-
- // Grow the buffer as needed.
- auto endOff = builder.create<mlir::arith::AddIOp>(loc, off, arrSz);
- mem = growBuffer(mem, endOff, limit, buffSize, eleSz);
-
- // Copy the elements to the buffer.
- mlir::Value byteSz =
- builder.create<mlir::arith::MulIOp>(loc, arrSz, eleSz);
- auto buff = builder.createConvert(loc, fir::HeapType::get(resTy), mem);
- mlir::Value buffi = computeCoordinate(buff, off);
- llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
- builder, loc, memcpyType(), buffi, v.getAddr(), byteSz,
- /*volatile=*/builder.createBool(loc, false));
- createCallMemcpy(args);
-
- // Save the incremented buffer position.
- builder.create<fir::StoreOp>(loc, endOff, buffPos);
- };
-
- // Copy a trivial scalar value into the buffer.
- auto doTrivialScalar = [&](const ExtValue &v, mlir::Value len = {}) {
- // Increment the buffer position.
- auto plusOne = builder.create<mlir::arith::AddIOp>(loc, off, one);
-
- // Grow the buffer as needed.
- mem = growBuffer(mem, plusOne, limit, buffSize, eleSz);
-
- // Store the element in the buffer.
- mlir::Value buff =
- builder.createConvert(loc, fir::HeapType::get(resTy), mem);
- auto buffi = builder.create<fir::CoordinateOp>(loc, eleRefTy, buff,
- mlir::ValueRange{off});
- fir::factory::genScalarAssignment(
- builder, loc,
- [&]() -> ExtValue {
- if (len)
- return fir::CharBoxValue(buffi, len);
- return buffi;
- }(),
- v);
- builder.create<fir::StoreOp>(loc, plusOne, buffPos);
- };
-
- // Copy the value.
- exv.match(
- [&](mlir::Value) { doTrivialScalar(exv); },
- [&](const fir::CharBoxValue &v) {
- auto buffer = v.getBuffer();
- if (fir::isa_char(buffer.getType())) {
- doTrivialScalar(exv, eleSz);
- } else {
- // Increment the buffer position.
- auto plusOne = builder.create<mlir::arith::AddIOp>(loc, off, one);
-
- // Grow the buffer as needed.
- mem = growBuffer(mem, plusOne, limit, buffSize, eleSz);
-
- // Store the element in the buffer.
- mlir::Value buff =
- builder.createConvert(loc, fir::HeapType::get(resTy), mem);
- mlir::Value buffi = computeCoordinate(buff, off);
- llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
- builder, loc, memcpyType(), buffi, v.getAddr(), eleSz,
- /*volatile=*/builder.createBool(loc, false));
- createCallMemcpy(args);
-
- builder.create<fir::StoreOp>(loc, plusOne, buffPos);
+ mlir::Value embox =
+ memref.getType().isa<fir::BoxType>()
+ ? builder.create<fir::ReboxOp>(loc, boxTy, memref, shape, slice)
+ .getResult()
+ : builder
+ .create<fir::EmboxOp>(loc, boxTy, memref, shape, slice,
+ fir::getTypeParams(extMemref))
+ .getResult();
+ return [=](IterSpace) -> ExtValue { return fir::BoxValue(embox); };
+ }
+ auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
+ if (isReferentiallyOpaque()) {
+ // Semantics are an opaque reference to an array.
+ // This case forwards a continuation that will generate the address
+ // arithmetic to the array element. This does not have copy-in/copy-out
+ // semantics. No attempt to copy the array value will be made during the
+ // interpretation of the Fortran statement.
+ mlir::Type refEleTy = builder.getRefType(eleTy);
+ return [=](IterSpace iters) -> ExtValue {
+ // ArrayCoorOp does not expect zero based indices.
+ llvm::SmallVector<mlir::Value> indices = fir::factory::originateIndices(
+ loc, builder, memref.getType(), shape, iters.iterVec());
+ mlir::Value coor = builder.create<fir::ArrayCoorOp>(
+ loc, refEleTy, memref, shape, slice, indices,
+ fir::getTypeParams(extMemref));
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ llvm::SmallVector<mlir::Value> substringBounds;
+ populateBounds(substringBounds, components.substring);
+ if (!substringBounds.empty()) {
+ mlir::Value dstLen = fir::factory::genLenOfCharacter(
+ builder, loc, arrTy.cast<fir::SequenceType>(), memref,
+ fir::getTypeParams(extMemref), iters.iterVec(),
+ substringBounds);
+ fir::CharBoxValue dstChar(coor, dstLen);
+ return fir::factory::CharacterExprHelper{builder, loc}
+ .createSubstring(dstChar, substringBounds);
}
- },
- [&](const fir::ArrayBoxValue &v) { doAbstractArray(v); },
- [&](const fir::CharArrayBoxValue &v) { doAbstractArray(v); },
- [&](const auto &) {
- TODO(loc, "unhandled array constructor expression");
- });
- return mem;
- }
-
- // Lower the expr cases in an ac-value-list.
- template <typename A>
- std::pair<ExtValue, bool>
- genArrayCtorInitializer(const Fortran::evaluate::Expr<A> &x, mlir::Type,
- mlir::Value, mlir::Value, mlir::Value,
- Fortran::lower::StatementContext &stmtCtx) {
- if (isArray(x))
- return {lowerNewArrayExpression(converter, symMap, stmtCtx, toEvExpr(x)),
- /*needCopy=*/true};
- return {asScalar(x), /*needCopy=*/true};
- }
-
- // Lower an ac-implied-do in an ac-value-list.
- template <typename A>
- std::pair<ExtValue, bool>
- genArrayCtorInitializer(const Fortran::evaluate::ImpliedDo<A> &x,
- mlir::Type resTy, mlir::Value mem,
- mlir::Value buffPos, mlir::Value buffSize,
- Fortran::lower::StatementContext &) {
- mlir::Location loc = getLoc();
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Value lo =
- builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.lower())));
- mlir::Value up =
- builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.upper())));
- mlir::Value step =
- builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.stride())));
- auto seqTy = resTy.template cast<fir::SequenceType>();
- mlir::Type eleTy = fir::unwrapSequenceType(seqTy);
- auto loop =
- builder.create<fir::DoLoopOp>(loc, lo, up, step, /*unordered=*/false,
- /*finalCount=*/false, mem);
- // create a new binding for x.name(), to ac-do-variable, to the iteration
- // value.
- symMap.pushImpliedDoBinding(toStringRef(x.name()), loop.getInductionVar());
- auto insPt = builder.saveInsertionPoint();
- builder.setInsertionPointToStart(loop.getBody());
- // Thread mem inside the loop via loop argument.
- mem = loop.getRegionIterArgs()[0];
-
- mlir::Type eleRefTy = builder.getRefType(eleTy);
-
- // Any temps created in the loop body must be freed inside the loop body.
- stmtCtx.pushScope();
- llvm::Optional<mlir::Value> charLen;
- for (const Fortran::evaluate::ArrayConstructorValue<A> &acv : x.values()) {
- auto [exv, copyNeeded] = std::visit(
- [&](const auto &v) {
- return genArrayCtorInitializer(v, resTy, mem, buffPos, buffSize,
- stmtCtx);
- },
- acv.u);
- mlir::Value eleSz = computeElementSize(exv, eleTy, resTy);
- mem = copyNeeded ? copyNextArrayCtorSection(exv, buffPos, buffSize, mem,
- eleSz, eleTy, eleRefTy, resTy)
- : fir::getBase(exv);
- if (fir::isa_char(seqTy.getEleTy()) && !charLen.hasValue()) {
- charLen = builder.createTemporary(loc, builder.getI64Type());
- mlir::Value castLen =
- builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv));
- builder.create<fir::StoreOp>(loc, castLen, charLen.getValue());
- }
+ }
+ return fir::factory::arraySectionElementToExtendedValue(
+ builder, loc, extMemref, coor, slice);
+ };
}
- stmtCtx.finalize(/*popScope=*/true);
-
- builder.create<fir::ResultOp>(loc, mem);
- builder.restoreInsertionPoint(insPt);
- mem = loop.getResult(0);
- symMap.popImpliedDoBinding();
- llvm::SmallVector<mlir::Value> extents = {
- builder.create<fir::LoadOp>(loc, buffPos).getResult()};
-
- // Convert to extended value.
- if (fir::isa_char(seqTy.getEleTy())) {
- auto len = builder.create<fir::LoadOp>(loc, charLen.getValue());
- return {fir::CharArrayBoxValue{mem, len, extents}, /*needCopy=*/false};
+ auto arrLoad = builder.create<fir::ArrayLoadOp>(
+ loc, arrTy, memref, shape, slice, fir::getTypeParams(extMemref));
+ mlir::Value arrLd = arrLoad.getResult();
+ if (isProjectedCopyInCopyOut()) {
+ // Semantics are projected copy-in copy-out.
+ // The backing store of the destination of an array expression may be
+ // partially modified. These updates are recorded in FIR by forwarding a
+ // continuation that generates an `array_update` Op. The destination is
+ // always loaded at the beginning of the statement and merged at the
+ // end.
+ destination = arrLoad;
+ auto lambda = ccStoreToDest.hasValue()
+ ? ccStoreToDest.getValue()
+ : defaultStoreToDestination(components.substring);
+ return [=](IterSpace iters) -> ExtValue { return lambda(iters); };
}
- return {fir::ArrayBoxValue{mem, extents}, /*needCopy=*/false};
- }
-
- // To simplify the handling and interaction between the various cases, array
- // constructors are always lowered to the incremental construction code
- // pattern, even if the extent of the array value is constant. After the
- // MemToReg pass and constant folding, the optimizer should be able to
- // determine that all the buffer overrun tests are false when the
- // incremental construction wasn't actually required.
- template <typename A>
- CC genarr(const Fortran::evaluate::ArrayConstructor<A> &x) {
- mlir::Location loc = getLoc();
- auto evExpr = toEvExpr(x);
- mlir::Type resTy = translateSomeExprToFIRType(converter, evExpr);
- mlir::IndexType idxTy = builder.getIndexType();
- auto seqTy = resTy.template cast<fir::SequenceType>();
- mlir::Type eleTy = fir::unwrapSequenceType(resTy);
- mlir::Value buffSize = builder.createTemporary(loc, idxTy, ".buff.size");
- mlir::Value zero = builder.createIntegerConstant(loc, idxTy, 0);
- mlir::Value buffPos = builder.createTemporary(loc, idxTy, ".buff.pos");
- builder.create<fir::StoreOp>(loc, zero, buffPos);
- // Allocate space for the array to be constructed.
- mlir::Value mem;
- if (fir::hasDynamicSize(resTy)) {
- if (fir::hasDynamicSize(eleTy)) {
- // The size of each element may depend on a general expression. Defer
- // creating the buffer until after the expression is evaluated.
- mem = builder.createNullConstant(loc, builder.getRefType(eleTy));
- builder.create<fir::StoreOp>(loc, zero, buffSize);
- } else {
- mlir::Value initBuffSz =
- builder.createIntegerConstant(loc, idxTy, clInitialBufferSize);
- mem = builder.create<fir::AllocMemOp>(
- loc, eleTy, /*typeparams=*/llvm::None, initBuffSz);
- builder.create<fir::StoreOp>(loc, initBuffSz, buffSize);
- }
- } else {
- mem = builder.create<fir::AllocMemOp>(loc, resTy);
- int64_t buffSz = 1;
- for (auto extent : seqTy.getShape())
- buffSz *= extent;
- mlir::Value initBuffSz =
- builder.createIntegerConstant(loc, idxTy, buffSz);
- builder.create<fir::StoreOp>(loc, initBuffSz, buffSize);
+ if (isCustomCopyInCopyOut()) {
+ // Create an array_modify to get the LHS element address and indicate
+ // the assignment, the actual assignment must be implemented in
+ // ccStoreToDest.
+ destination = arrLoad;
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value innerArg = iters.innerArgument();
+ mlir::Type resTy = innerArg.getType();
+ mlir::Type eleTy = fir::applyPathToType(resTy, iters.iterVec());
+ mlir::Type refEleTy =
+ fir::isa_ref_type(eleTy) ? eleTy : builder.getRefType(eleTy);
+ auto arrModify = builder.create<fir::ArrayModifyOp>(
+ loc, mlir::TypeRange{refEleTy, resTy}, innerArg, iters.iterVec(),
+ destination.getTypeparams());
+ return abstractArrayExtValue(arrModify.getResult(1));
+ };
}
- // Compute size of element
- mlir::Type eleRefTy = builder.getRefType(eleTy);
-
- // Populate the buffer with the elements, growing as necessary.
- llvm::Optional<mlir::Value> charLen;
- for (const auto &expr : x) {
- auto [exv, copyNeeded] = std::visit(
- [&](const auto &e) {
- return genArrayCtorInitializer(e, resTy, mem, buffPos, buffSize,
- stmtCtx);
- },
- expr.u);
- mlir::Value eleSz = computeElementSize(exv, eleTy, resTy);
- mem = copyNeeded ? copyNextArrayCtorSection(exv, buffPos, buffSize, mem,
- eleSz, eleTy, eleRefTy, resTy)
- : fir::getBase(exv);
- if (fir::isa_char(seqTy.getEleTy()) && !charLen.hasValue()) {
- charLen = builder.createTemporary(loc, builder.getI64Type());
- mlir::Value castLen =
- builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv));
- builder.create<fir::StoreOp>(loc, castLen, charLen.getValue());
- }
+ if (isCopyInCopyOut()) {
+ // Semantics are copy-in copy-out.
+ // The continuation simply forwards the result of the `array_load` Op,
+ // which is the value of the array as it was when loaded. All data
+ // references with rank > 0 in an array expression typically have
+ // copy-in copy-out semantics.
+ return [=](IterSpace) -> ExtValue { return arrLd; };
}
- mem = builder.createConvert(loc, fir::HeapType::get(resTy), mem);
- llvm::SmallVector<mlir::Value> extents = {
- builder.create<fir::LoadOp>(loc, buffPos)};
-
- // Cleanup the temporary.
- fir::FirOpBuilder *bldr = &converter.getFirOpBuilder();
- stmtCtx.attachCleanup(
- [bldr, loc, mem]() { bldr->create<fir::FreeMemOp>(loc, mem); });
-
- // Return the continuation.
- if (fir::isa_char(seqTy.getEleTy())) {
- if (charLen.hasValue()) {
- auto len = builder.create<fir::LoadOp>(loc, charLen.getValue());
- return genarr(fir::CharArrayBoxValue{mem, len, extents});
- }
- return genarr(fir::CharArrayBoxValue{mem, zero, extents});
+ mlir::Operation::operand_range arrLdTypeParams = arrLoad.getTypeparams();
+ if (isValueAttribute()) {
+ // Semantics are value attribute.
+ // Here the continuation will `array_fetch` a value from an array and
+ // then store that value in a temporary. One can thus imitate pass by
+ // value even when the call is pass by reference.
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value base;
+ mlir::Type eleTy = fir::applyPathToType(arrTy, iters.iterVec());
+ if (isAdjustedArrayElementType(eleTy)) {
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+ base = builder.create<fir::ArrayAccessOp>(
+ loc, eleRefTy, arrLd, iters.iterVec(), arrLdTypeParams);
+ } else {
+ base = builder.create<fir::ArrayFetchOp>(
+ loc, eleTy, arrLd, iters.iterVec(), arrLdTypeParams);
+ }
+ mlir::Value temp = builder.createTemporary(
+ loc, base.getType(),
+ llvm::ArrayRef<mlir::NamedAttribute>{
+ Fortran::lower::getAdaptToByRefAttr(builder)});
+ builder.create<fir::StoreOp>(loc, base, temp);
+ return fir::factory::arraySectionElementToExtendedValue(
+ builder, loc, extMemref, temp, slice);
+ };
}
- return genarr(fir::ArrayBoxValue{mem, extents});
- }
-
- CC genarr(const Fortran::evaluate::ImpliedDoIndex &) {
- TODO(getLoc(), "genarr ImpliedDoIndex");
- }
-
- CC genarr(const Fortran::evaluate::TypeParamInquiry &x) {
- TODO(getLoc(), "genarr TypeParamInquiry");
- }
-
- CC genarr(const Fortran::evaluate::DescriptorInquiry &x) {
- TODO(getLoc(), "genarr DescriptorInquiry");
- }
-
- CC genarr(const Fortran::evaluate::StructureConstructor &x) {
- TODO(getLoc(), "genarr StructureConstructor");
- }
-
- //===--------------------------------------------------------------------===//
- // LOCICAL operators (.NOT., .AND., .EQV., etc.)
- //===--------------------------------------------------------------------===//
-
- template <int KIND>
- CC genarr(const Fortran::evaluate::Not<KIND> &x) {
- mlir::Location loc = getLoc();
- mlir::IntegerType i1Ty = builder.getI1Type();
- auto lambda = genarr(x.left());
- mlir::Value truth = builder.createBool(loc, true);
+ // In the default case, the array reference forwards an `array_fetch` or
+ // `array_access` Op in the continuation.
return [=](IterSpace iters) -> ExtValue {
- mlir::Value logical = fir::getBase(lambda(iters));
- mlir::Value val = builder.createConvert(loc, i1Ty, logical);
- return builder.create<mlir::arith::XOrIOp>(loc, val, truth);
+ mlir::Type eleTy = fir::applyPathToType(arrTy, iters.iterVec());
+ if (isAdjustedArrayElementType(eleTy)) {
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+ mlir::Value arrayOp = builder.create<fir::ArrayAccessOp>(
+ loc, eleRefTy, arrLd, iters.iterVec(), arrLdTypeParams);
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ llvm::SmallVector<mlir::Value> substringBounds;
+ populateBounds(substringBounds, components.substring);
+ if (!substringBounds.empty()) {
+ mlir::Value dstLen = fir::factory::genLenOfCharacter(
+ builder, loc, arrLoad, iters.iterVec(), substringBounds);
+ fir::CharBoxValue dstChar(arrayOp, dstLen);
+ return fir::factory::CharacterExprHelper{builder, loc}
+ .createSubstring(dstChar, substringBounds);
+ }
+ }
+ return fir::factory::arraySectionElementToExtendedValue(
+ builder, loc, extMemref, arrayOp, slice);
+ }
+ auto arrFetch = builder.create<fir::ArrayFetchOp>(
+ loc, eleTy, arrLd, iters.iterVec(), arrLdTypeParams);
+ return fir::factory::arraySectionElementToExtendedValue(
+ builder, loc, extMemref, arrFetch, slice);
};
}
- template <typename OP, typename A>
- CC createBinaryBoolOp(const A &x) {
- mlir::Location loc = getLoc();
- mlir::IntegerType i1Ty = builder.getI1Type();
- auto lf = genarr(x.left());
- auto rf = genarr(x.right());
- return [=](IterSpace iters) -> ExtValue {
- mlir::Value left = fir::getBase(lf(iters));
- mlir::Value right = fir::getBase(rf(iters));
- mlir::Value lhs = builder.createConvert(loc, i1Ty, left);
- mlir::Value rhs = builder.createConvert(loc, i1Ty, right);
- return builder.create<OP>(loc, lhs, rhs);
- };
+
+ /// Given an optional fir.box, returns an fir.box that is the original one if
+ /// it is present and it otherwise an unallocated box.
+ /// Absent fir.box are implemented as a null pointer descriptor. Generated
+ /// code may need to unconditionally read a fir.box that can be absent.
+ /// This helper allows creating a fir.box that can be read in all cases
+ /// outside of a fir.if (isPresent) region. However, the usages of the value
+ /// read from such box should still only be done in a fir.if(isPresent).
+ static fir::ExtendedValue
+ absentBoxToUnalllocatedBox(fir::FirOpBuilder &builder, mlir::Location loc,
+ const fir::ExtendedValue &exv,
+ mlir::Value isPresent) {
+ mlir::Value box = fir::getBase(exv);
+ mlir::Type boxType = box.getType();
+ assert(boxType.isa<fir::BoxType>() && "argument must be a fir.box");
+ mlir::Value emptyBox =
+ fir::factory::createUnallocatedBox(builder, loc, boxType, llvm::None);
+ auto safeToReadBox =
+ builder.create<mlir::arith::SelectOp>(loc, isPresent, box, emptyBox);
+ return fir::substBase(exv, safeToReadBox);
}
- template <typename OP, typename A>
- CC createCompareBoolOp(mlir::arith::CmpIPredicate pred, const A &x) {
+
+ std::tuple<CC, mlir::Value, mlir::Type>
+ genOptionalArrayFetch(const Fortran::lower::SomeExpr &expr) {
+ assert(expr.Rank() > 0 && "expr must be an array");
mlir::Location loc = getLoc();
- mlir::IntegerType i1Ty = builder.getI1Type();
- auto lf = genarr(x.left());
- auto rf = genarr(x.right());
- return [=](IterSpace iters) -> ExtValue {
- mlir::Value left = fir::getBase(lf(iters));
- mlir::Value right = fir::getBase(rf(iters));
- mlir::Value lhs = builder.createConvert(loc, i1Ty, left);
- mlir::Value rhs = builder.createConvert(loc, i1Ty, right);
- return builder.create<OP>(loc, pred, lhs, rhs);
- };
- }
- template <int KIND>
- CC genarr(const Fortran::evaluate::LogicalOperation<KIND> &x) {
- switch (x.logicalOperator) {
- case Fortran::evaluate::LogicalOperator::And:
- return createBinaryBoolOp<mlir::arith::AndIOp>(x);
- case Fortran::evaluate::LogicalOperator::Or:
- return createBinaryBoolOp<mlir::arith::OrIOp>(x);
- case Fortran::evaluate::LogicalOperator::Eqv:
- return createCompareBoolOp<mlir::arith::CmpIOp>(
- mlir::arith::CmpIPredicate::eq, x);
- case Fortran::evaluate::LogicalOperator::Neqv:
- return createCompareBoolOp<mlir::arith::CmpIOp>(
- mlir::arith::CmpIPredicate::ne, x);
- case Fortran::evaluate::LogicalOperator::Not:
- llvm_unreachable(".NOT. handled elsewhere");
+ ExtValue optionalArg = asInquired(expr);
+ mlir::Value isPresent = genActualIsPresentTest(builder, loc, optionalArg);
+ // Generate an array load and access to an array that may be an absent
+ // optional or an unallocated optional.
+ mlir::Value base = getBase(optionalArg);
+ const bool hasOptionalAttr =
+ fir::valueHasFirAttribute(base, fir::getOptionalAttrName());
+ mlir::Type baseType = fir::unwrapRefType(base.getType());
+ const bool isBox = baseType.isa<fir::BoxType>();
+ const bool isAllocOrPtr = Fortran::evaluate::IsAllocatableOrPointerObject(
+ expr, converter.getFoldingContext());
+ mlir::Type arrType = fir::unwrapPassByRefType(baseType);
+ mlir::Type eleType = fir::unwrapSequenceType(arrType);
+ ExtValue exv = optionalArg;
+ if (hasOptionalAttr && isBox && !isAllocOrPtr) {
+ // Elemental argument cannot be allocatable or pointers (C15100).
+ // Hence, per 15.5.2.12 3 (8) and (9), the provided Allocatable and
+ // Pointer optional arrays cannot be absent. The only kind of entities
+ // that can get here are optional assumed shape and polymorphic entities.
+ exv = absentBoxToUnalllocatedBox(builder, loc, exv, isPresent);
}
- llvm_unreachable("unhandled case");
- }
+ // All the properties can be read from any fir.box but the read values may
+ // be undefined and should only be used inside a fir.if (canBeRead) region.
+ if (const auto *mutableBox = exv.getBoxOf<fir::MutableBoxValue>())
+ exv = fir::factory::genMutableBoxRead(builder, loc, *mutableBox);
- //===--------------------------------------------------------------------===//
- // Relational operators (<, <=, ==, etc.)
- //===--------------------------------------------------------------------===//
+ mlir::Value memref = fir::getBase(exv);
+ mlir::Value shape = builder.createShape(loc, exv);
+ mlir::Value noSlice;
+ auto arrLoad = builder.create<fir::ArrayLoadOp>(
+ loc, arrType, memref, shape, noSlice, fir::getTypeParams(exv));
+ mlir::Operation::operand_range arrLdTypeParams = arrLoad.getTypeparams();
+ mlir::Value arrLd = arrLoad.getResult();
+ // Mark the load to tell later passes it is unsafe to use this array_load
+ // shape unconditionally.
+ arrLoad->setAttr(fir::getOptionalAttrName(), builder.getUnitAttr());
- template <typename OP, typename PRED, typename A>
- CC createCompareOp(PRED pred, const A &x) {
- mlir::Location loc = getLoc();
- auto lf = genarr(x.left());
- auto rf = genarr(x.right());
- return [=](IterSpace iters) -> ExtValue {
- mlir::Value lhs = fir::getBase(lf(iters));
- mlir::Value rhs = fir::getBase(rf(iters));
- return builder.create<OP>(loc, pred, lhs, rhs);
+ // Place the array as optional on the arrayOperands stack so that its
+ // shape will only be used as a fallback to induce the implicit loop nest
+ // (that is if there is no non optional array arguments).
+ arrayOperands.push_back(
+ ArrayOperand{memref, shape, noSlice, /*mayBeAbsent=*/true});
+
+ // By value semantics.
+ auto cc = [=](IterSpace iters) -> ExtValue {
+ auto arrFetch = builder.create<fir::ArrayFetchOp>(
+ loc, eleType, arrLd, iters.iterVec(), arrLdTypeParams);
+ return fir::factory::arraySectionElementToExtendedValue(
+ builder, loc, exv, arrFetch, noSlice);
};
+ return {cc, isPresent, eleType};
}
- template <typename A>
- CC createCompareCharOp(mlir::arith::CmpIPredicate pred, const A &x) {
+
+ /// Generate a continuation to pass \p expr to an OPTIONAL argument of an
+ /// elemental procedure. This is meant to handle the cases where \p expr might
+ /// be dynamically absent (i.e. when it is a POINTER, an ALLOCATABLE or an
+ /// OPTIONAL variable). If p\ expr is guaranteed to be present genarr() can
+ /// directly be called instead.
+ CC genarrForwardOptionalArgumentToCall(const Fortran::lower::SomeExpr &expr) {
mlir::Location loc = getLoc();
- auto lf = genarr(x.left());
- auto rf = genarr(x.right());
- return [=](IterSpace iters) -> ExtValue {
- auto lhs = lf(iters);
- auto rhs = rf(iters);
- return fir::runtime::genCharCompare(builder, loc, pred, lhs, rhs);
- };
- }
- template <int KIND>
- CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
- Fortran::common::TypeCategory::Integer, KIND>> &x) {
- return createCompareOp<mlir::arith::CmpIOp>(translateRelational(x.opr), x);
- }
- template <int KIND>
- CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
- Fortran::common::TypeCategory::Character, KIND>> &x) {
- return createCompareCharOp(translateRelational(x.opr), x);
- }
- template <int KIND>
- CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
- Fortran::common::TypeCategory::Real, KIND>> &x) {
- return createCompareOp<mlir::arith::CmpFOp>(translateFloatRelational(x.opr),
- x);
- }
- template <int KIND>
- CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
- Fortran::common::TypeCategory::Complex, KIND>> &x) {
- return createCompareOp<fir::CmpcOp>(translateFloatRelational(x.opr), x);
- }
- CC genarr(
- const Fortran::evaluate::Relational<Fortran::evaluate::SomeType> &r) {
- return std::visit([&](const auto &x) { return genarr(x); }, r.u);
- }
+ // Only by-value numerical and logical so far.
+ if (semant != ConstituentSemantics::RefTransparent)
+ TODO(loc, "optional arguments in user defined elemental procedures");
- template <typename A>
- CC genarr(const Fortran::evaluate::Designator<A> &des) {
- ComponentPath components(des.Rank() > 0);
- return std::visit([&](const auto &x) { return genarr(x, components); },
- des.u);
- }
+ // Handle scalar argument case (the if-then-else is generated outside of the
+ // implicit loop nest).
+ if (expr.Rank() == 0) {
+ ExtValue optionalArg = asInquired(expr);
+ mlir::Value isPresent = genActualIsPresentTest(builder, loc, optionalArg);
+ mlir::Value elementValue =
+ fir::getBase(genOptionalValue(builder, loc, optionalArg, isPresent));
+ return [=](IterSpace iters) -> ExtValue { return elementValue; };
+ }
- template <typename T>
- CC genarr(const Fortran::evaluate::FunctionRef<T> &funRef) {
- // Note that it's possible that the function being called returns either an
- // array or a scalar. In the first case, use the element type of the array.
- return genProcRef(
- funRef, fir::unwrapSequenceType(converter.genType(toEvExpr(funRef))));
+ CC cc;
+ mlir::Value isPresent;
+ mlir::Type eleType;
+ std::tie(cc, isPresent, eleType) = genOptionalArrayFetch(expr);
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value elementValue =
+ builder
+ .genIfOp(loc, {eleType}, isPresent,
+ /*withElseRegion=*/true)
+ .genThen([&]() {
+ builder.create<fir::ResultOp>(loc, fir::getBase(cc(iters)));
+ })
+ .genElse([&]() {
+ mlir::Value zero =
+ fir::factory::createZeroValue(builder, loc, eleType);
+ builder.create<fir::ResultOp>(loc, zero);
+ })
+ .getResults()[0];
+ return elementValue;
+ };
}
- //===-------------------------------------------------------------------===//
- // Array data references in an explicit iteration space.
- //
- // Use the base array that was loaded before the loop nest.
- //===-------------------------------------------------------------------===//
-
- /// Lower the path (`revPath`, in reverse) to be appended to an array_fetch or
- /// array_update op. \p ty is the initial type of the array
- /// (reference). Returns the type of the element after application of the
- /// path in \p components.
- ///
- /// TODO: This needs to deal with array's with initial bounds other than 1.
- /// TODO: Thread type parameters correctly.
- mlir::Type lowerPath(const ExtValue &arrayExv, ComponentPath &components) {
- mlir::Location loc = getLoc();
- mlir::Type ty = fir::getBase(arrayExv).getType();
- auto &revPath = components.reversePath;
- ty = fir::unwrapPassByRefType(ty);
- bool prefix = true;
- auto addComponent = [&](mlir::Value v) {
- if (prefix)
- components.prefixComponents.push_back(v);
- else
- components.suffixComponents.push_back(v);
- };
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
- bool atBase = true;
- auto saveSemant = semant;
- if (isProjectedCopyInCopyOut())
- semant = ConstituentSemantics::RefTransparent;
- for (const auto &v : llvm::reverse(revPath)) {
- std::visit(
- Fortran::common::visitors{
- [&](const ImplicitSubscripts &) {
- prefix = false;
- ty = fir::unwrapSequenceType(ty);
- },
- [&](const Fortran::evaluate::ComplexPart *x) {
- assert(!prefix && "complex part must be at end");
- mlir::Value offset = builder.createIntegerConstant(
- loc, builder.getI32Type(),
- x->part() == Fortran::evaluate::ComplexPart::Part::RE ? 0
- : 1);
- components.suffixComponents.push_back(offset);
- ty = fir::applyPathToType(ty, mlir::ValueRange{offset});
- },
- [&](const Fortran::evaluate::ArrayRef *x) {
- if (Fortran::lower::isRankedArrayAccess(*x)) {
- genSliceIndices(components, arrayExv, *x, atBase);
- } else {
- // Array access where the expressions are scalar and cannot
- // depend upon the implied iteration space.
- unsigned ssIndex = 0u;
- for (const auto &ss : x->subscript()) {
- std::visit(
- Fortran::common::visitors{
- [&](const Fortran::evaluate::
- IndirectSubscriptIntegerExpr &ie) {
- const auto &e = ie.value();
- if (isArray(e))
- fir::emitFatalError(
- loc,
- "multiple components along single path "
- "generating array subexpressions");
- // Lower scalar index expression, append it to
- // subs.
- mlir::Value subscriptVal =
- fir::getBase(asScalarArray(e));
- // arrayExv is the base array. It needs to reflect
- // the current array component instead.
- // FIXME: must use lower bound of this component,
- // not just the constant 1.
- mlir::Value lb =
- atBase ? fir::factory::readLowerBound(
- builder, loc, arrayExv, ssIndex,
- one)
- : one;
- mlir::Value val = builder.createConvert(
- loc, idxTy, subscriptVal);
- mlir::Value ivAdj =
- builder.create<mlir::arith::SubIOp>(
- loc, idxTy, val, lb);
- addComponent(
- builder.createConvert(loc, idxTy, ivAdj));
- },
- [&](const auto &) {
- fir::emitFatalError(
- loc, "multiple components along single path "
- "generating array subexpressions");
- }},
- ss.u);
- ssIndex++;
- }
- }
- ty = fir::unwrapSequenceType(ty);
- },
- [&](const Fortran::evaluate::Component *x) {
- auto fieldTy = fir::FieldType::get(builder.getContext());
- llvm::StringRef name = toStringRef(getLastSym(*x).name());
- auto recTy = ty.cast<fir::RecordType>();
- ty = recTy.getType(name);
- auto fld = builder.create<fir::FieldIndexOp>(
- loc, fieldTy, name, recTy, fir::getTypeParams(arrayExv));
- addComponent(fld);
- }},
- v);
- atBase = false;
+ /// Reduce the rank of a array to be boxed based on the slice's operands.
+ static mlir::Type reduceRank(mlir::Type arrTy, mlir::Value slice) {
+ if (slice) {
+ auto slOp = mlir::dyn_cast<fir::SliceOp>(slice.getDefiningOp());
+ assert(slOp && "expected slice op");
+ auto seqTy = arrTy.dyn_cast<fir::SequenceType>();
+ assert(seqTy && "expected array type");
+ mlir::Operation::operand_range triples = slOp.getTriples();
+ fir::SequenceType::Shape shape;
+ // reduce the rank for each invariant dimension
+ for (unsigned i = 1, end = triples.size(); i < end; i += 3)
+ if (!mlir::isa_and_nonnull<fir::UndefOp>(triples[i].getDefiningOp()))
+ shape.push_back(fir::SequenceType::getUnknownExtent());
+ return fir::SequenceType::get(shape, seqTy.getEleTy());
}
- semant = saveSemant;
- ty = fir::unwrapSequenceType(ty);
- components.applied = true;
- return ty;
+ // not sliced, so no change in rank
+ return arrTy;
}
- llvm::SmallVector<mlir::Value> genSubstringBounds(ComponentPath &components) {
- llvm::SmallVector<mlir::Value> result;
- if (components.substring)
- populateBounds(result, components.substring);
- return result;
+ /// Example: <code>array%RE</code>
+ CC genarr(const Fortran::evaluate::ComplexPart &x,
+ ComponentPath &components) {
+ components.reversePath.push_back(&x);
+ return genarr(x.complex(), components);
}
- CC applyPathToArrayLoad(fir::ArrayLoadOp load, ComponentPath &components) {
+ template <typename A>
+ CC genSlicePath(const A &x, ComponentPath &components) {
+ return genarr(x, components);
+ }
+
+ CC genarr(const Fortran::evaluate::StaticDataObject::Pointer &,
+ ComponentPath &components) {
+ fir::emitFatalError(getLoc(), "substring of static array object");
+ }
+
+ /// Substrings (see 9.4.1)
+ CC genarr(const Fortran::evaluate::Substring &x, ComponentPath &components) {
+ components.substring = &x;
+ return std::visit([&](const auto &v) { return genarr(v, components); },
+ x.parent());
+ }
+
+ template <typename T>
+ CC genarr(const Fortran::evaluate::FunctionRef<T> &funRef) {
+ // Note that it's possible that the function being called returns either an
+ // array or a scalar. In the first case, use the element type of the array.
+ return genProcRef(
+ funRef, fir::unwrapSequenceType(converter.genType(toEvExpr(funRef))));
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Array construction
+ //===--------------------------------------------------------------------===//
+
+ /// Target agnostic computation of the size of an element in the array.
+ /// Returns the size in bytes with type `index` or a null Value if the element
+ /// size is not constant.
+ mlir::Value computeElementSize(const ExtValue &exv, mlir::Type eleTy,
+ mlir::Type resTy) {
mlir::Location loc = getLoc();
- auto revPath = components.reversePath;
- fir::ExtendedValue arrayExv =
- arrayLoadExtValue(builder, loc, load, {}, load);
- mlir::Type eleTy = lowerPath(arrayExv, components);
- auto currentPC = components.pc;
- auto pc = [=, prefix = components.prefixComponents,
- suffix = components.suffixComponents](IterSpace iters) {
- IterationSpace newIters = currentPC(iters);
- // Add path prefix and suffix.
- IterationSpace addIters(newIters, prefix, suffix);
- return addIters;
- };
- components.pc = [=](IterSpace iters) { return iters; };
- llvm::SmallVector<mlir::Value> substringBounds =
- genSubstringBounds(components);
- if (isProjectedCopyInCopyOut()) {
- destination = load;
- auto lambda = [=, esp = this->explicitSpace](IterSpace iters) mutable {
- mlir::Value innerArg = esp->findArgumentOfLoad(load);
- if (isAdjustedArrayElementType(eleTy)) {
- mlir::Type eleRefTy = builder.getRefType(eleTy);
- auto arrayOp = builder.create<fir::ArrayAccessOp>(
- loc, eleRefTy, innerArg, iters.iterVec(), load.getTypeparams());
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- mlir::Value dstLen = fir::factory::genLenOfCharacter(
- builder, loc, load, iters.iterVec(), substringBounds);
- fir::ArrayAmendOp amend = createCharArrayAmend(
- loc, builder, arrayOp, dstLen, iters.elementExv(), innerArg,
- substringBounds);
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(), amend,
- dstLen);
- } else if (fir::isa_derived(eleTy)) {
- fir::ArrayAmendOp amend =
- createDerivedArrayAmend(loc, load, builder, arrayOp,
- iters.elementExv(), eleTy, innerArg);
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(),
- amend);
- }
- assert(eleTy.isa<fir::SequenceType>());
- TODO(loc, "array (as element) assignment");
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Value multiplier = builder.createIntegerConstant(loc, idxTy, 1);
+ if (fir::hasDynamicSize(eleTy)) {
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ // Array of char with dynamic length parameter. Downcast to an array
+ // of singleton char, and scale by the len type parameter from
+ // `exv`.
+ exv.match(
+ [&](const fir::CharBoxValue &cb) { multiplier = cb.getLen(); },
+ [&](const fir::CharArrayBoxValue &cb) { multiplier = cb.getLen(); },
+ [&](const fir::BoxValue &box) {
+ multiplier = fir::factory::CharacterExprHelper(builder, loc)
+ .readLengthFromBox(box.getAddr());
+ },
+ [&](const fir::MutableBoxValue &box) {
+ multiplier = fir::factory::CharacterExprHelper(builder, loc)
+ .readLengthFromBox(box.getAddr());
+ },
+ [&](const auto &) {
+ fir::emitFatalError(loc,
+ "array constructor element has unknown size");
+ });
+ fir::CharacterType newEleTy = fir::CharacterType::getSingleton(
+ eleTy.getContext(), charTy.getFKind());
+ if (auto seqTy = resTy.dyn_cast<fir::SequenceType>()) {
+ assert(eleTy == seqTy.getEleTy());
+ resTy = fir::SequenceType::get(seqTy.getShape(), newEleTy);
}
- mlir::Value castedElement =
- builder.createConvert(loc, eleTy, iters.getElement());
- auto update = builder.create<fir::ArrayUpdateOp>(
- loc, innerArg.getType(), innerArg, castedElement, iters.iterVec(),
- load.getTypeparams());
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(), update);
- };
- return [=](IterSpace iters) mutable { return lambda(pc(iters)); };
- }
- if (isCustomCopyInCopyOut()) {
- // Create an array_modify to get the LHS element address and indicate
- // the assignment, and create the call to the user defined assignment.
- destination = load;
- auto lambda = [=](IterSpace iters) mutable {
- mlir::Value innerArg = explicitSpace->findArgumentOfLoad(load);
- mlir::Type refEleTy =
- fir::isa_ref_type(eleTy) ? eleTy : builder.getRefType(eleTy);
- auto arrModify = builder.create<fir::ArrayModifyOp>(
- loc, mlir::TypeRange{refEleTy, innerArg.getType()}, innerArg,
- iters.iterVec(), load.getTypeparams());
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(),
- arrModify.getResult(1));
- };
- return [=](IterSpace iters) mutable { return lambda(pc(iters)); };
+ eleTy = newEleTy;
+ } else {
+ TODO(loc, "dynamic sized type");
+ }
}
- auto lambda = [=, semant = this->semant](IterSpace iters) mutable {
- if (semant == ConstituentSemantics::RefOpaque ||
- isAdjustedArrayElementType(eleTy)) {
- mlir::Type resTy = builder.getRefType(eleTy);
- // Use array element reference semantics.
- auto access = builder.create<fir::ArrayAccessOp>(
- loc, resTy, load, iters.iterVec(), load.getTypeparams());
- mlir::Value newBase = access;
- if (fir::isa_char(eleTy)) {
- mlir::Value dstLen = fir::factory::genLenOfCharacter(
- builder, loc, load, iters.iterVec(), substringBounds);
- if (!substringBounds.empty()) {
- fir::CharBoxValue charDst{access, dstLen};
- fir::factory::CharacterExprHelper helper{builder, loc};
- charDst = helper.createSubstring(charDst, substringBounds);
- newBase = charDst.getAddr();
- }
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(), newBase,
- dstLen);
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+ mlir::Type resRefTy = builder.getRefType(resTy);
+ mlir::Value nullPtr = builder.createNullConstant(loc, resRefTy);
+ auto offset = builder.create<fir::CoordinateOp>(
+ loc, eleRefTy, nullPtr, mlir::ValueRange{multiplier});
+ return builder.createConvert(loc, idxTy, offset);
+ }
+
+ /// Get the function signature of the LLVM memcpy intrinsic.
+ mlir::FunctionType memcpyType() {
+ return fir::factory::getLlvmMemcpy(builder).getFunctionType();
+ }
+
+ /// Create a call to the LLVM memcpy intrinsic.
+ void createCallMemcpy(llvm::ArrayRef<mlir::Value> args) {
+ mlir::Location loc = getLoc();
+ mlir::FuncOp memcpyFunc = fir::factory::getLlvmMemcpy(builder);
+ mlir::SymbolRefAttr funcSymAttr =
+ builder.getSymbolRefAttr(memcpyFunc.getName());
+ mlir::FunctionType funcTy = memcpyFunc.getFunctionType();
+ builder.create<fir::CallOp>(loc, funcTy.getResults(), funcSymAttr, args);
+ }
+
+ // Construct code to check for a buffer overrun and realloc the buffer when
+ // space is depleted. This is done between each item in the ac-value-list.
+ mlir::Value growBuffer(mlir::Value mem, mlir::Value needed,
+ mlir::Value bufferSize, mlir::Value buffSize,
+ mlir::Value eleSz) {
+ mlir::Location loc = getLoc();
+ mlir::FuncOp reallocFunc = fir::factory::getRealloc(builder);
+ auto cond = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::sle, bufferSize, needed);
+ auto ifOp = builder.create<fir::IfOp>(loc, mem.getType(), cond,
+ /*withElseRegion=*/true);
+ auto insPt = builder.saveInsertionPoint();
+ builder.setInsertionPointToStart(&ifOp.getThenRegion().front());
+ // Not enough space, resize the buffer.
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Value two = builder.createIntegerConstant(loc, idxTy, 2);
+ auto newSz = builder.create<mlir::arith::MulIOp>(loc, needed, two);
+ builder.create<fir::StoreOp>(loc, newSz, buffSize);
+ mlir::Value byteSz = builder.create<mlir::arith::MulIOp>(loc, newSz, eleSz);
+ mlir::SymbolRefAttr funcSymAttr =
+ builder.getSymbolRefAttr(reallocFunc.getName());
+ mlir::FunctionType funcTy = reallocFunc.getFunctionType();
+ auto newMem = builder.create<fir::CallOp>(
+ loc, funcTy.getResults(), funcSymAttr,
+ llvm::ArrayRef<mlir::Value>{
+ builder.createConvert(loc, funcTy.getInputs()[0], mem),
+ builder.createConvert(loc, funcTy.getInputs()[1], byteSz)});
+ mlir::Value castNewMem =
+ builder.createConvert(loc, mem.getType(), newMem.getResult(0));
+ builder.create<fir::ResultOp>(loc, castNewMem);
+ builder.setInsertionPointToStart(&ifOp.getElseRegion().front());
+ // Otherwise, just forward the buffer.
+ builder.create<fir::ResultOp>(loc, mem);
+ builder.restoreInsertionPoint(insPt);
+ return ifOp.getResult(0);
+ }
+
+ /// Copy the next value (or vector of values) into the array being
+ /// constructed.
+ mlir::Value copyNextArrayCtorSection(const ExtValue &exv, mlir::Value buffPos,
+ mlir::Value buffSize, mlir::Value mem,
+ mlir::Value eleSz, mlir::Type eleTy,
+ mlir::Type eleRefTy, mlir::Type resTy) {
+ mlir::Location loc = getLoc();
+ auto off = builder.create<fir::LoadOp>(loc, buffPos);
+ auto limit = builder.create<fir::LoadOp>(loc, buffSize);
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+
+ if (fir::isRecordWithAllocatableMember(eleTy))
+ TODO(loc, "deep copy on allocatable members");
+
+ if (!eleSz) {
+ // Compute the element size at runtime.
+ assert(fir::hasDynamicSize(eleTy));
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ auto charBytes =
+ builder.getKindMap().getCharacterBitsize(charTy.getFKind()) / 8;
+ mlir::Value bytes =
+ builder.createIntegerConstant(loc, idxTy, charBytes);
+ mlir::Value length = fir::getLen(exv);
+ if (!length)
+ fir::emitFatalError(loc, "result is not boxed character");
+ eleSz = builder.create<mlir::arith::MulIOp>(loc, bytes, length);
+ } else {
+ TODO(loc, "PDT size");
+ // Will call the PDT's size function with the type parameters.
+ }
+ }
+
+ // Compute the coordinate using `fir.coordinate_of`, or, if the type has
+ // dynamic size, generating the pointer arithmetic.
+ auto computeCoordinate = [&](mlir::Value buff, mlir::Value off) {
+ mlir::Type refTy = eleRefTy;
+ if (fir::hasDynamicSize(eleTy)) {
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
+ // Scale a simple pointer using dynamic length and offset values.
+ auto chTy = fir::CharacterType::getSingleton(charTy.getContext(),
+ charTy.getFKind());
+ refTy = builder.getRefType(chTy);
+ mlir::Type toTy = builder.getRefType(builder.getVarLenSeqTy(chTy));
+ buff = builder.createConvert(loc, toTy, buff);
+ off = builder.create<mlir::arith::MulIOp>(loc, off, eleSz);
+ } else {
+ TODO(loc, "PDT offset");
}
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(), newBase);
}
- auto fetch = builder.create<fir::ArrayFetchOp>(
- loc, eleTy, load, iters.iterVec(), load.getTypeparams());
- return arrayLoadExtValue(builder, loc, load, iters.iterVec(), fetch);
+ auto coor = builder.create<fir::CoordinateOp>(loc, refTy, buff,
+ mlir::ValueRange{off});
+ return builder.createConvert(loc, eleRefTy, coor);
};
- return [=](IterSpace iters) mutable {
- auto newIters = pc(iters);
- return lambda(newIters);
+
+ // Lambda to lower an abstract array box value.
+ auto doAbstractArray = [&](const auto &v) {
+ // Compute the array size.
+ mlir::Value arrSz = one;
+ for (auto ext : v.getExtents())
+ arrSz = builder.create<mlir::arith::MulIOp>(loc, arrSz, ext);
+
+ // Grow the buffer as needed.
+ auto endOff = builder.create<mlir::arith::AddIOp>(loc, off, arrSz);
+ mem = growBuffer(mem, endOff, limit, buffSize, eleSz);
+
+ // Copy the elements to the buffer.
+ mlir::Value byteSz =
+ builder.create<mlir::arith::MulIOp>(loc, arrSz, eleSz);
+ auto buff = builder.createConvert(loc, fir::HeapType::get(resTy), mem);
+ mlir::Value buffi = computeCoordinate(buff, off);
+ llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
+ builder, loc, memcpyType(), buffi, v.getAddr(), byteSz,
+ /*volatile=*/builder.createBool(loc, false));
+ createCallMemcpy(args);
+
+ // Save the incremented buffer position.
+ builder.create<fir::StoreOp>(loc, endOff, buffPos);
+ };
+
+ // Copy a trivial scalar value into the buffer.
+ auto doTrivialScalar = [&](const ExtValue &v, mlir::Value len = {}) {
+ // Increment the buffer position.
+ auto plusOne = builder.create<mlir::arith::AddIOp>(loc, off, one);
+
+ // Grow the buffer as needed.
+ mem = growBuffer(mem, plusOne, limit, buffSize, eleSz);
+
+ // Store the element in the buffer.
+ mlir::Value buff =
+ builder.createConvert(loc, fir::HeapType::get(resTy), mem);
+ auto buffi = builder.create<fir::CoordinateOp>(loc, eleRefTy, buff,
+ mlir::ValueRange{off});
+ fir::factory::genScalarAssignment(
+ builder, loc,
+ [&]() -> ExtValue {
+ if (len)
+ return fir::CharBoxValue(buffi, len);
+ return buffi;
+ }(),
+ v);
+ builder.create<fir::StoreOp>(loc, plusOne, buffPos);
};
+
+ // Copy the value.
+ exv.match(
+ [&](mlir::Value) { doTrivialScalar(exv); },
+ [&](const fir::CharBoxValue &v) {
+ auto buffer = v.getBuffer();
+ if (fir::isa_char(buffer.getType())) {
+ doTrivialScalar(exv, eleSz);
+ } else {
+ // Increment the buffer position.
+ auto plusOne = builder.create<mlir::arith::AddIOp>(loc, off, one);
+
+ // Grow the buffer as needed.
+ mem = growBuffer(mem, plusOne, limit, buffSize, eleSz);
+
+ // Store the element in the buffer.
+ mlir::Value buff =
+ builder.createConvert(loc, fir::HeapType::get(resTy), mem);
+ mlir::Value buffi = computeCoordinate(buff, off);
+ llvm::SmallVector<mlir::Value> args = fir::runtime::createArguments(
+ builder, loc, memcpyType(), buffi, v.getAddr(), eleSz,
+ /*volatile=*/builder.createBool(loc, false));
+ createCallMemcpy(args);
+
+ builder.create<fir::StoreOp>(loc, plusOne, buffPos);
+ }
+ },
+ [&](const fir::ArrayBoxValue &v) { doAbstractArray(v); },
+ [&](const fir::CharArrayBoxValue &v) { doAbstractArray(v); },
+ [&](const auto &) {
+ TODO(loc, "unhandled array constructor expression");
+ });
+ return mem;
}
+ // Lower the expr cases in an ac-value-list.
template <typename A>
- CC genImplicitArrayAccess(const A &x, ComponentPath &components) {
- components.reversePath.push_back(ImplicitSubscripts{});
- ExtValue exv = asScalarRef(x);
- lowerPath(exv, components);
- auto lambda = genarr(exv, components);
- return [=](IterSpace iters) { return lambda(components.pc(iters)); };
- }
- CC genImplicitArrayAccess(const Fortran::evaluate::NamedEntity &x,
- ComponentPath &components) {
- if (x.IsSymbol())
- return genImplicitArrayAccess(x.GetFirstSymbol(), components);
- return genImplicitArrayAccess(x.GetComponent(), components);
+ std::pair<ExtValue, bool>
+ genArrayCtorInitializer(const Fortran::evaluate::Expr<A> &x, mlir::Type,
+ mlir::Value, mlir::Value, mlir::Value,
+ Fortran::lower::StatementContext &stmtCtx) {
+ if (isArray(x))
+ return {lowerNewArrayExpression(converter, symMap, stmtCtx, toEvExpr(x)),
+ /*needCopy=*/true};
+ return {asScalar(x), /*needCopy=*/true};
}
+ // Lower an ac-implied-do in an ac-value-list.
template <typename A>
- CC genAsScalar(const A &x) {
+ std::pair<ExtValue, bool>
+ genArrayCtorInitializer(const Fortran::evaluate::ImpliedDo<A> &x,
+ mlir::Type resTy, mlir::Value mem,
+ mlir::Value buffPos, mlir::Value buffSize,
+ Fortran::lower::StatementContext &) {
mlir::Location loc = getLoc();
- if (isProjectedCopyInCopyOut()) {
- return [=, &x, builder = &converter.getFirOpBuilder()](
- IterSpace iters) -> ExtValue {
- ExtValue exv = asScalarRef(x);
- mlir::Value val = fir::getBase(exv);
- mlir::Type eleTy = fir::unwrapRefType(val.getType());
- if (isAdjustedArrayElementType(eleTy)) {
- if (fir::isa_char(eleTy)) {
- TODO(getLoc(), "assignment of character type");
- } else if (fir::isa_derived(eleTy)) {
- TODO(loc, "assignment of derived type");
- } else {
- fir::emitFatalError(loc, "array type not expected in scalar");
- }
- } else {
- builder->create<fir::StoreOp>(loc, iters.getElement(), val);
- }
- return exv;
- };
- }
- return [=, &x](IterSpace) { return asScalar(x); };
- }
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Value lo =
+ builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.lower())));
+ mlir::Value up =
+ builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.upper())));
+ mlir::Value step =
+ builder.createConvert(loc, idxTy, fir::getBase(asScalar(x.stride())));
+ auto seqTy = resTy.template cast<fir::SequenceType>();
+ mlir::Type eleTy = fir::unwrapSequenceType(seqTy);
+ auto loop =
+ builder.create<fir::DoLoopOp>(loc, lo, up, step, /*unordered=*/false,
+ /*finalCount=*/false, mem);
+ // create a new binding for x.name(), to ac-do-variable, to the iteration
+ // value.
+ symMap.pushImpliedDoBinding(toStringRef(x.name()), loop.getInductionVar());
+ auto insPt = builder.saveInsertionPoint();
+ builder.setInsertionPointToStart(loop.getBody());
+ // Thread mem inside the loop via loop argument.
+ mem = loop.getRegionIterArgs()[0];
- CC genarr(const Fortran::semantics::Symbol &x, ComponentPath &components) {
- if (explicitSpaceIsActive()) {
- if (x.Rank() > 0)
- components.reversePath.push_back(ImplicitSubscripts{});
- if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x))
- return applyPathToArrayLoad(load, components);
- } else {
- return genImplicitArrayAccess(x, components);
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+
+ // Any temps created in the loop body must be freed inside the loop body.
+ stmtCtx.pushScope();
+ llvm::Optional<mlir::Value> charLen;
+ for (const Fortran::evaluate::ArrayConstructorValue<A> &acv : x.values()) {
+ auto [exv, copyNeeded] = std::visit(
+ [&](const auto &v) {
+ return genArrayCtorInitializer(v, resTy, mem, buffPos, buffSize,
+ stmtCtx);
+ },
+ acv.u);
+ mlir::Value eleSz = computeElementSize(exv, eleTy, resTy);
+ mem = copyNeeded ? copyNextArrayCtorSection(exv, buffPos, buffSize, mem,
+ eleSz, eleTy, eleRefTy, resTy)
+ : fir::getBase(exv);
+ if (fir::isa_char(seqTy.getEleTy()) && !charLen.hasValue()) {
+ charLen = builder.createTemporary(loc, builder.getI64Type());
+ mlir::Value castLen =
+ builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv));
+ builder.create<fir::StoreOp>(loc, castLen, charLen.getValue());
+ }
}
- if (pathIsEmpty(components))
- return genAsScalar(x);
- mlir::Location loc = getLoc();
- return [=](IterSpace) -> ExtValue {
- fir::emitFatalError(loc, "reached symbol with path");
- };
- }
+ stmtCtx.finalize(/*popScope=*/true);
- /// Lower a component path with or without rank.
- /// Example: <code>array%baz%qux%waldo</code>
- CC genarr(const Fortran::evaluate::Component &x, ComponentPath &components) {
- if (explicitSpaceIsActive()) {
- if (x.base().Rank() == 0 && x.Rank() > 0)
- components.reversePath.push_back(ImplicitSubscripts{});
- if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x))
- return applyPathToArrayLoad(load, components);
- } else {
- if (x.base().Rank() == 0)
- return genImplicitArrayAccess(x, components);
+ builder.create<fir::ResultOp>(loc, mem);
+ builder.restoreInsertionPoint(insPt);
+ mem = loop.getResult(0);
+ symMap.popImpliedDoBinding();
+ llvm::SmallVector<mlir::Value> extents = {
+ builder.create<fir::LoadOp>(loc, buffPos).getResult()};
+
+ // Convert to extended value.
+ if (fir::isa_char(seqTy.getEleTy())) {
+ auto len = builder.create<fir::LoadOp>(loc, charLen.getValue());
+ return {fir::CharArrayBoxValue{mem, len, extents}, /*needCopy=*/false};
}
- bool atEnd = pathIsEmpty(components);
- if (!getLastSym(x).test(Fortran::semantics::Symbol::Flag::ParentComp))
- // Skip parent components; their components are placed directly in the
- // object.
- components.reversePath.push_back(&x);
- auto result = genarr(x.base(), components);
- if (components.applied)
- return result;
- if (atEnd)
- return genAsScalar(x);
- mlir::Location loc = getLoc();
- return [=](IterSpace) -> ExtValue {
- fir::emitFatalError(loc, "reached component with path");
- };
+ return {fir::ArrayBoxValue{mem, extents}, /*needCopy=*/false};
}
- /// Array reference with subscripts. If this has rank > 0, this is a form
- /// of an array section (slice).
- ///
- /// There are two "slicing" primitives that may be applied on a dimension by
- /// dimension basis: (1) triple notation and (2) vector addressing. Since
- /// dimensions can be selectively sliced, some dimensions may contain
- /// regular scalar expressions and those dimensions do not participate in
- /// the array expression evaluation.
- CC genarr(const Fortran::evaluate::ArrayRef &x, ComponentPath &components) {
- if (explicitSpaceIsActive()) {
- if (Fortran::lower::isRankedArrayAccess(x))
- components.reversePath.push_back(ImplicitSubscripts{});
- if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x)) {
- components.reversePath.push_back(&x);
- return applyPathToArrayLoad(load, components);
+ // To simplify the handling and interaction between the various cases, array
+ // constructors are always lowered to the incremental construction code
+ // pattern, even if the extent of the array value is constant. After the
+ // MemToReg pass and constant folding, the optimizer should be able to
+ // determine that all the buffer overrun tests are false when the
+ // incremental construction wasn't actually required.
+ template <typename A>
+ CC genarr(const Fortran::evaluate::ArrayConstructor<A> &x) {
+ mlir::Location loc = getLoc();
+ auto evExpr = toEvExpr(x);
+ mlir::Type resTy = translateSomeExprToFIRType(converter, evExpr);
+ mlir::IndexType idxTy = builder.getIndexType();
+ auto seqTy = resTy.template cast<fir::SequenceType>();
+ mlir::Type eleTy = fir::unwrapSequenceType(resTy);
+ mlir::Value buffSize = builder.createTemporary(loc, idxTy, ".buff.size");
+ mlir::Value zero = builder.createIntegerConstant(loc, idxTy, 0);
+ mlir::Value buffPos = builder.createTemporary(loc, idxTy, ".buff.pos");
+ builder.create<fir::StoreOp>(loc, zero, buffPos);
+ // Allocate space for the array to be constructed.
+ mlir::Value mem;
+ if (fir::hasDynamicSize(resTy)) {
+ if (fir::hasDynamicSize(eleTy)) {
+ // The size of each element may depend on a general expression. Defer
+ // creating the buffer until after the expression is evaluated.
+ mem = builder.createNullConstant(loc, builder.getRefType(eleTy));
+ builder.create<fir::StoreOp>(loc, zero, buffSize);
+ } else {
+ mlir::Value initBuffSz =
+ builder.createIntegerConstant(loc, idxTy, clInitialBufferSize);
+ mem = builder.create<fir::AllocMemOp>(
+ loc, eleTy, /*typeparams=*/llvm::None, initBuffSz);
+ builder.create<fir::StoreOp>(loc, initBuffSz, buffSize);
}
} else {
- if (Fortran::lower::isRankedArrayAccess(x)) {
- components.reversePath.push_back(&x);
- return genImplicitArrayAccess(x.base(), components);
+ mem = builder.create<fir::AllocMemOp>(loc, resTy);
+ int64_t buffSz = 1;
+ for (auto extent : seqTy.getShape())
+ buffSz *= extent;
+ mlir::Value initBuffSz =
+ builder.createIntegerConstant(loc, idxTy, buffSz);
+ builder.create<fir::StoreOp>(loc, initBuffSz, buffSize);
+ }
+ // Compute size of element
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+
+ // Populate the buffer with the elements, growing as necessary.
+ llvm::Optional<mlir::Value> charLen;
+ for (const auto &expr : x) {
+ auto [exv, copyNeeded] = std::visit(
+ [&](const auto &e) {
+ return genArrayCtorInitializer(e, resTy, mem, buffPos, buffSize,
+ stmtCtx);
+ },
+ expr.u);
+ mlir::Value eleSz = computeElementSize(exv, eleTy, resTy);
+ mem = copyNeeded ? copyNextArrayCtorSection(exv, buffPos, buffSize, mem,
+ eleSz, eleTy, eleRefTy, resTy)
+ : fir::getBase(exv);
+ if (fir::isa_char(seqTy.getEleTy()) && !charLen.hasValue()) {
+ charLen = builder.createTemporary(loc, builder.getI64Type());
+ mlir::Value castLen =
+ builder.createConvert(loc, builder.getI64Type(), fir::getLen(exv));
+ builder.create<fir::StoreOp>(loc, castLen, charLen.getValue());
}
}
- bool atEnd = pathIsEmpty(components);
- components.reversePath.push_back(&x);
- auto result = genarr(x.base(), components);
- if (components.applied)
- return result;
- mlir::Location loc = getLoc();
- if (atEnd) {
- if (x.Rank() == 0)
- return genAsScalar(x);
- fir::emitFatalError(loc, "expected scalar");
+ mem = builder.createConvert(loc, fir::HeapType::get(resTy), mem);
+ llvm::SmallVector<mlir::Value> extents = {
+ builder.create<fir::LoadOp>(loc, buffPos)};
+
+ // Cleanup the temporary.
+ fir::FirOpBuilder *bldr = &converter.getFirOpBuilder();
+ stmtCtx.attachCleanup(
+ [bldr, loc, mem]() { bldr->create<fir::FreeMemOp>(loc, mem); });
+
+ // Return the continuation.
+ if (fir::isa_char(seqTy.getEleTy())) {
+ if (charLen.hasValue()) {
+ auto len = builder.create<fir::LoadOp>(loc, charLen.getValue());
+ return genarr(fir::CharArrayBoxValue{mem, len, extents});
+ }
+ return genarr(fir::CharArrayBoxValue{mem, zero, extents});
}
- return [=](IterSpace) -> ExtValue {
- fir::emitFatalError(loc, "reached arrayref with path");
- };
+ return genarr(fir::ArrayBoxValue{mem, extents});
}
- CC genarr(const Fortran::evaluate::CoarrayRef &x, ComponentPath &components) {
- TODO(getLoc(), "coarray reference");
+ CC genarr(const Fortran::evaluate::ImpliedDoIndex &) {
+ fir::emitFatalError(getLoc(), "implied do index cannot have rank > 0");
}
-
- CC genarr(const Fortran::evaluate::NamedEntity &x,
- ComponentPath &components) {
- return x.IsSymbol() ? genarr(x.GetFirstSymbol(), components)
- : genarr(x.GetComponent(), components);
+ CC genarr(const Fortran::evaluate::TypeParamInquiry &x) {
+ TODO(getLoc(), "array expr type parameter inquiry");
+ return [](IterSpace iters) -> ExtValue { return mlir::Value{}; };
}
-
- CC genarr(const Fortran::evaluate::DataRef &x, ComponentPath &components) {
- return std::visit([&](const auto &v) { return genarr(v, components); },
- x.u);
+ CC genarr(const Fortran::evaluate::DescriptorInquiry &x) {
+ TODO(getLoc(), "array expr descriptor inquiry");
+ return [](IterSpace iters) -> ExtValue { return mlir::Value{}; };
}
-
- bool pathIsEmpty(const ComponentPath &components) {
- return components.reversePath.empty();
+ CC genarr(const Fortran::evaluate::StructureConstructor &x) {
+ TODO(getLoc(), "structure constructor");
+ return [](IterSpace iters) -> ExtValue { return mlir::Value{}; };
}
- /// Given an optional fir.box, returns an fir.box that is the original one if
- /// it is present and it otherwise an unallocated box.
- /// Absent fir.box are implemented as a null pointer descriptor. Generated
- /// code may need to unconditionally read a fir.box that can be absent.
- /// This helper allows creating a fir.box that can be read in all cases
- /// outside of a fir.if (isPresent) region. However, the usages of the value
- /// read from such box should still only be done in a fir.if(isPresent).
- static fir::ExtendedValue
- absentBoxToUnalllocatedBox(fir::FirOpBuilder &builder, mlir::Location loc,
- const fir::ExtendedValue &exv,
- mlir::Value isPresent) {
- mlir::Value box = fir::getBase(exv);
- mlir::Type boxType = box.getType();
- assert(boxType.isa<fir::BoxType>() && "argument must be a fir.box");
- mlir::Value emptyBox =
- fir::factory::createUnallocatedBox(builder, loc, boxType, llvm::None);
- auto safeToReadBox =
- builder.create<mlir::arith::SelectOp>(loc, isPresent, box, emptyBox);
- return fir::substBase(exv, safeToReadBox);
- }
+ //===--------------------------------------------------------------------===//
+ // LOCICAL operators (.NOT., .AND., .EQV., etc.)
+ //===--------------------------------------------------------------------===//
- std::tuple<CC, mlir::Value, mlir::Type>
- genOptionalArrayFetch(const Fortran::lower::SomeExpr &expr) {
- assert(expr.Rank() > 0 && "expr must be an array");
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Not<KIND> &x) {
mlir::Location loc = getLoc();
- ExtValue optionalArg = asInquired(expr);
- mlir::Value isPresent = genActualIsPresentTest(builder, loc, optionalArg);
- // Generate an array load and access to an array that may be an absent
- // optional or an unallocated optional.
- mlir::Value base = getBase(optionalArg);
- const bool hasOptionalAttr =
- fir::valueHasFirAttribute(base, fir::getOptionalAttrName());
- mlir::Type baseType = fir::unwrapRefType(base.getType());
- const bool isBox = baseType.isa<fir::BoxType>();
- const bool isAllocOrPtr = Fortran::evaluate::IsAllocatableOrPointerObject(
- expr, converter.getFoldingContext());
- mlir::Type arrType = fir::unwrapPassByRefType(baseType);
- mlir::Type eleType = fir::unwrapSequenceType(arrType);
- ExtValue exv = optionalArg;
- if (hasOptionalAttr && isBox && !isAllocOrPtr) {
- // Elemental argument cannot be allocatable or pointers (C15100).
- // Hence, per 15.5.2.12 3 (8) and (9), the provided Allocatable and
- // Pointer optional arrays cannot be absent. The only kind of entities
- // that can get here are optional assumed shape and polymorphic entities.
- exv = absentBoxToUnalllocatedBox(builder, loc, exv, isPresent);
- }
- // All the properties can be read from any fir.box but the read values may
- // be undefined and should only be used inside a fir.if (canBeRead) region.
- if (const auto *mutableBox = exv.getBoxOf<fir::MutableBoxValue>())
- exv = fir::factory::genMutableBoxRead(builder, loc, *mutableBox);
-
- mlir::Value memref = fir::getBase(exv);
- mlir::Value shape = builder.createShape(loc, exv);
- mlir::Value noSlice;
- auto arrLoad = builder.create<fir::ArrayLoadOp>(
- loc, arrType, memref, shape, noSlice, fir::getTypeParams(exv));
- mlir::Operation::operand_range arrLdTypeParams = arrLoad.getTypeparams();
- mlir::Value arrLd = arrLoad.getResult();
- // Mark the load to tell later passes it is unsafe to use this array_load
- // shape unconditionally.
- arrLoad->setAttr(fir::getOptionalAttrName(), builder.getUnitAttr());
-
- // Place the array as optional on the arrayOperands stack so that its
- // shape will only be used as a fallback to induce the implicit loop nest
- // (that is if there is no non optional array arguments).
- arrayOperands.push_back(
- ArrayOperand{memref, shape, noSlice, /*mayBeAbsent=*/true});
-
- // By value semantics.
- auto cc = [=](IterSpace iters) -> ExtValue {
- auto arrFetch = builder.create<fir::ArrayFetchOp>(
- loc, eleType, arrLd, iters.iterVec(), arrLdTypeParams);
- return fir::factory::arraySectionElementToExtendedValue(
- builder, loc, exv, arrFetch, noSlice);
+ mlir::IntegerType i1Ty = builder.getI1Type();
+ auto lambda = genarr(x.left());
+ mlir::Value truth = builder.createBool(loc, true);
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value logical = fir::getBase(lambda(iters));
+ mlir::Value val = builder.createConvert(loc, i1Ty, logical);
+ return builder.create<mlir::arith::XOrIOp>(loc, val, truth);
};
- return {cc, isPresent, eleType};
+ }
+ template <typename OP, typename A>
+ CC createBinaryBoolOp(const A &x) {
+ mlir::Location loc = getLoc();
+ mlir::IntegerType i1Ty = builder.getI1Type();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value left = fir::getBase(lf(iters));
+ mlir::Value right = fir::getBase(rf(iters));
+ mlir::Value lhs = builder.createConvert(loc, i1Ty, left);
+ mlir::Value rhs = builder.createConvert(loc, i1Ty, right);
+ return builder.create<OP>(loc, lhs, rhs);
+ };
+ }
+ template <typename OP, typename A>
+ CC createCompareBoolOp(mlir::arith::CmpIPredicate pred, const A &x) {
+ mlir::Location loc = getLoc();
+ mlir::IntegerType i1Ty = builder.getI1Type();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ return [=](IterSpace iters) -> ExtValue {
+ mlir::Value left = fir::getBase(lf(iters));
+ mlir::Value right = fir::getBase(rf(iters));
+ mlir::Value lhs = builder.createConvert(loc, i1Ty, left);
+ mlir::Value rhs = builder.createConvert(loc, i1Ty, right);
+ return builder.create<OP>(loc, pred, lhs, rhs);
+ };
+ }
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::LogicalOperation<KIND> &x) {
+ switch (x.logicalOperator) {
+ case Fortran::evaluate::LogicalOperator::And:
+ return createBinaryBoolOp<mlir::arith::AndIOp>(x);
+ case Fortran::evaluate::LogicalOperator::Or:
+ return createBinaryBoolOp<mlir::arith::OrIOp>(x);
+ case Fortran::evaluate::LogicalOperator::Eqv:
+ return createCompareBoolOp<mlir::arith::CmpIOp>(
+ mlir::arith::CmpIPredicate::eq, x);
+ case Fortran::evaluate::LogicalOperator::Neqv:
+ return createCompareBoolOp<mlir::arith::CmpIOp>(
+ mlir::arith::CmpIPredicate::ne, x);
+ case Fortran::evaluate::LogicalOperator::Not:
+ llvm_unreachable(".NOT. handled elsewhere");
+ }
+ llvm_unreachable("unhandled case");
}
- /// Generate a continuation to pass \p expr to an OPTIONAL argument of an
- /// elemental procedure. This is meant to handle the cases where \p expr might
- /// be dynamically absent (i.e. when it is a POINTER, an ALLOCATABLE or an
- /// OPTIONAL variable). If p\ expr is guaranteed to be present genarr() can
- /// directly be called instead.
- CC genarrForwardOptionalArgumentToCall(const Fortran::lower::SomeExpr &expr) {
- mlir::Location loc = getLoc();
- // Only by-value numerical and logical so far.
- if (semant != ConstituentSemantics::RefTransparent)
- TODO(loc, "optional arguments in user defined elemental procedures");
-
- // Handle scalar argument case (the if-then-else is generated outside of the
- // implicit loop nest).
- if (expr.Rank() == 0) {
- ExtValue optionalArg = asInquired(expr);
- mlir::Value isPresent = genActualIsPresentTest(builder, loc, optionalArg);
- mlir::Value elementValue =
- fir::getBase(genOptionalValue(builder, loc, optionalArg, isPresent));
- return [=](IterSpace iters) -> ExtValue { return elementValue; };
- }
+ //===--------------------------------------------------------------------===//
+ // Relational operators (<, <=, ==, etc.)
+ //===--------------------------------------------------------------------===//
- CC cc;
- mlir::Value isPresent;
- mlir::Type eleType;
- std::tie(cc, isPresent, eleType) = genOptionalArrayFetch(expr);
+ template <typename OP, typename PRED, typename A>
+ CC createCompareOp(PRED pred, const A &x) {
+ mlir::Location loc = getLoc();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
return [=](IterSpace iters) -> ExtValue {
- mlir::Value elementValue =
- builder
- .genIfOp(loc, {eleType}, isPresent,
- /*withElseRegion=*/true)
- .genThen([&]() {
- builder.create<fir::ResultOp>(loc, fir::getBase(cc(iters)));
- })
- .genElse([&]() {
- mlir::Value zero =
- fir::factory::createZeroValue(builder, loc, eleType);
- builder.create<fir::ResultOp>(loc, zero);
- })
- .getResults()[0];
- return elementValue;
+ mlir::Value lhs = fir::getBase(lf(iters));
+ mlir::Value rhs = fir::getBase(rf(iters));
+ return builder.create<OP>(loc, pred, lhs, rhs);
};
}
-
- /// Reduce the rank of a array to be boxed based on the slice's operands.
- static mlir::Type reduceRank(mlir::Type arrTy, mlir::Value slice) {
- if (slice) {
- auto slOp = mlir::dyn_cast<fir::SliceOp>(slice.getDefiningOp());
- assert(slOp && "expected slice op");
- auto seqTy = arrTy.dyn_cast<fir::SequenceType>();
- assert(seqTy && "expected array type");
- mlir::Operation::operand_range triples = slOp.getTriples();
- fir::SequenceType::Shape shape;
- // reduce the rank for each invariant dimension
- for (unsigned i = 1, end = triples.size(); i < end; i += 3)
- if (!mlir::isa_and_nonnull<fir::UndefOp>(triples[i].getDefiningOp()))
- shape.push_back(fir::SequenceType::getUnknownExtent());
- return fir::SequenceType::get(shape, seqTy.getEleTy());
- }
- // not sliced, so no change in rank
- return arrTy;
+ template <typename A>
+ CC createCompareCharOp(mlir::arith::CmpIPredicate pred, const A &x) {
+ mlir::Location loc = getLoc();
+ auto lf = genarr(x.left());
+ auto rf = genarr(x.right());
+ return [=](IterSpace iters) -> ExtValue {
+ auto lhs = lf(iters);
+ auto rhs = rf(iters);
+ return fir::runtime::genCharCompare(builder, loc, pred, lhs, rhs);
+ };
}
-
- CC genarr(const Fortran::evaluate::ComplexPart &x,
- ComponentPath &components) {
- components.reversePath.push_back(&x);
- return genarr(x.complex(), components);
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
+ Fortran::common::TypeCategory::Integer, KIND>> &x) {
+ return createCompareOp<mlir::arith::CmpIOp>(translateRelational(x.opr), x);
+ }
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
+ Fortran::common::TypeCategory::Character, KIND>> &x) {
+ return createCompareCharOp(translateRelational(x.opr), x);
+ }
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
+ Fortran::common::TypeCategory::Real, KIND>> &x) {
+ return createCompareOp<mlir::arith::CmpFOp>(translateFloatRelational(x.opr),
+ x);
+ }
+ template <int KIND>
+ CC genarr(const Fortran::evaluate::Relational<Fortran::evaluate::Type<
+ Fortran::common::TypeCategory::Complex, KIND>> &x) {
+ return createCompareOp<fir::CmpcOp>(translateFloatRelational(x.opr), x);
+ }
+ CC genarr(
+ const Fortran::evaluate::Relational<Fortran::evaluate::SomeType> &r) {
+ return std::visit([&](const auto &x) { return genarr(x); }, r.u);
}
- CC genarr(const Fortran::evaluate::StaticDataObject::Pointer &,
- ComponentPath &components) {
- TODO(getLoc(), "genarr StaticDataObject::Pointer");
+ template <typename A>
+ CC genarr(const Fortran::evaluate::Designator<A> &des) {
+ ComponentPath components(des.Rank() > 0);
+ return std::visit([&](const auto &x) { return genarr(x, components); },
+ des.u);
}
- /// Substrings (see 9.4.1)
- CC genarr(const Fortran::evaluate::Substring &x, ComponentPath &components) {
- components.substring = &x;
- return std::visit([&](const auto &v) { return genarr(v, components); },
- x.parent());
+ /// Is the path component rank > 0?
+ static bool ranked(const PathComponent &x) {
+ return std::visit(Fortran::common::visitors{
+ [](const ImplicitSubscripts &) { return false; },
+ [](const auto *v) { return v->Rank() > 0; }},
+ x);
}
- /// Base case of generating an array reference,
- CC genarr(const ExtValue &extMemref, ComponentPath &components) {
- mlir::Location loc = getLoc();
- mlir::Value memref = fir::getBase(extMemref);
- mlir::Type arrTy = fir::dyn_cast_ptrOrBoxEleTy(memref.getType());
- assert(arrTy.isa<fir::SequenceType>() && "memory ref must be an array");
- mlir::Value shape = builder.createShape(loc, extMemref);
- mlir::Value slice;
- if (components.isSlice()) {
- if (isBoxValue() && components.substring) {
- // Append the substring operator to emboxing Op as it will become an
- // interior adjustment (add offset, adjust LEN) to the CHARACTER value
- // being referenced in the descriptor.
- llvm::SmallVector<mlir::Value> substringBounds;
- populateBounds(substringBounds, components.substring);
- // Convert to (offset, size)
- mlir::Type iTy = substringBounds[0].getType();
- if (substringBounds.size() != 2) {
- fir::CharacterType charTy =
- fir::factory::CharacterExprHelper::getCharType(arrTy);
- if (charTy.hasConstantLen()) {
- mlir::IndexType idxTy = builder.getIndexType();
- fir::CharacterType::LenType charLen = charTy.getLen();
- mlir::Value lenValue =
- builder.createIntegerConstant(loc, idxTy, charLen);
- substringBounds.push_back(lenValue);
- } else {
- llvm::SmallVector<mlir::Value> typeparams =
- fir::getTypeParams(extMemref);
- substringBounds.push_back(typeparams.back());
- }
- }
- // Convert the lower bound to 0-based substring.
- mlir::Value one =
- builder.createIntegerConstant(loc, substringBounds[0].getType(), 1);
- substringBounds[0] =
- builder.create<mlir::arith::SubIOp>(loc, substringBounds[0], one);
- // Convert the upper bound to a length.
- mlir::Value cast = builder.createConvert(loc, iTy, substringBounds[1]);
- mlir::Value zero = builder.createIntegerConstant(loc, iTy, 0);
- auto size =
- builder.create<mlir::arith::SubIOp>(loc, cast, substringBounds[0]);
- auto cmp = builder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::sgt, size, zero);
- // size = MAX(upper - (lower - 1), 0)
- substringBounds[1] =
- builder.create<mlir::arith::SelectOp>(loc, cmp, size, zero);
- slice = builder.create<fir::SliceOp>(loc, components.trips,
- components.suffixComponents,
- substringBounds);
- } else {
- slice = builder.createSlice(loc, extMemref, components.trips,
- components.suffixComponents);
- }
- if (components.hasComponents()) {
- auto seqTy = arrTy.cast<fir::SequenceType>();
- mlir::Type eleTy =
- fir::applyPathToType(seqTy.getEleTy(), components.suffixComponents);
- if (!eleTy)
- fir::emitFatalError(loc, "slicing path is ill-formed");
- if (auto realTy = eleTy.dyn_cast<fir::RealType>())
- eleTy = Fortran::lower::convertReal(realTy.getContext(),
- realTy.getFKind());
+ //===-------------------------------------------------------------------===//
+ // Array data references in an explicit iteration space.
+ //
+ // Use the base array that was loaded before the loop nest.
+ //===-------------------------------------------------------------------===//
- // create the type of the projected array.
- arrTy = fir::SequenceType::get(seqTy.getShape(), eleTy);
- LLVM_DEBUG(llvm::dbgs()
- << "type of array projection from component slicing: "
- << eleTy << ", " << arrTy << '\n');
- }
- }
- arrayOperands.push_back(ArrayOperand{memref, shape, slice});
- if (destShape.empty())
- destShape = getShape(arrayOperands.back());
- if (isBoxValue()) {
- // Semantics are a reference to a boxed array.
- // This case just requires that an embox operation be created to box the
- // value. The value of the box is forwarded in the continuation.
- mlir::Type reduceTy = reduceRank(arrTy, slice);
- auto boxTy = fir::BoxType::get(reduceTy);
- if (components.substring) {
- // Adjust char length to substring size.
- fir::CharacterType charTy =
- fir::factory::CharacterExprHelper::getCharType(reduceTy);
- auto seqTy = reduceTy.cast<fir::SequenceType>();
- // TODO: Use a constant for fir.char LEN if we can compute it.
- boxTy = fir::BoxType::get(
- fir::SequenceType::get(fir::CharacterType::getUnknownLen(
- builder.getContext(), charTy.getFKind()),
- seqTy.getDimension()));
- }
- mlir::Value embox =
- memref.getType().isa<fir::BoxType>()
- ? builder.create<fir::ReboxOp>(loc, boxTy, memref, shape, slice)
- .getResult()
- : builder
- .create<fir::EmboxOp>(loc, boxTy, memref, shape, slice,
- fir::getTypeParams(extMemref))
- .getResult();
- return [=](IterSpace) -> ExtValue { return fir::BoxValue(embox); };
+ /// Lower the path (`revPath`, in reverse) to be appended to an array_fetch or
+ /// array_update op. \p ty is the initial type of the array
+ /// (reference). Returns the type of the element after application of the
+ /// path in \p components.
+ ///
+ /// TODO: This needs to deal with array's with initial bounds other than 1.
+ /// TODO: Thread type parameters correctly.
+ mlir::Type lowerPath(const ExtValue &arrayExv, ComponentPath &components) {
+ mlir::Location loc = getLoc();
+ mlir::Type ty = fir::getBase(arrayExv).getType();
+ auto &revPath = components.reversePath;
+ ty = fir::unwrapPassByRefType(ty);
+ bool prefix = true;
+ auto addComponent = [&](mlir::Value v) {
+ if (prefix)
+ components.prefixComponents.push_back(v);
+ else
+ components.suffixComponents.push_back(v);
+ };
+ mlir::IndexType idxTy = builder.getIndexType();
+ mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
+ bool atBase = true;
+ auto saveSemant = semant;
+ if (isProjectedCopyInCopyOut())
+ semant = ConstituentSemantics::RefTransparent;
+ for (const auto &v : llvm::reverse(revPath)) {
+ std::visit(
+ Fortran::common::visitors{
+ [&](const ImplicitSubscripts &) {
+ prefix = false;
+ ty = fir::unwrapSequenceType(ty);
+ },
+ [&](const Fortran::evaluate::ComplexPart *x) {
+ assert(!prefix && "complex part must be at end");
+ mlir::Value offset = builder.createIntegerConstant(
+ loc, builder.getI32Type(),
+ x->part() == Fortran::evaluate::ComplexPart::Part::RE ? 0
+ : 1);
+ components.suffixComponents.push_back(offset);
+ ty = fir::applyPathToType(ty, mlir::ValueRange{offset});
+ },
+ [&](const Fortran::evaluate::ArrayRef *x) {
+ if (Fortran::lower::isRankedArrayAccess(*x)) {
+ genSliceIndices(components, arrayExv, *x, atBase);
+ } else {
+ // Array access where the expressions are scalar and cannot
+ // depend upon the implied iteration space.
+ unsigned ssIndex = 0u;
+ for (const auto &ss : x->subscript()) {
+ std::visit(
+ Fortran::common::visitors{
+ [&](const Fortran::evaluate::
+ IndirectSubscriptIntegerExpr &ie) {
+ const auto &e = ie.value();
+ if (isArray(e))
+ fir::emitFatalError(
+ loc,
+ "multiple components along single path "
+ "generating array subexpressions");
+ // Lower scalar index expression, append it to
+ // subs.
+ mlir::Value subscriptVal =
+ fir::getBase(asScalarArray(e));
+ // arrayExv is the base array. It needs to reflect
+ // the current array component instead.
+ // FIXME: must use lower bound of this component,
+ // not just the constant 1.
+ mlir::Value lb =
+ atBase ? fir::factory::readLowerBound(
+ builder, loc, arrayExv, ssIndex,
+ one)
+ : one;
+ mlir::Value val = builder.createConvert(
+ loc, idxTy, subscriptVal);
+ mlir::Value ivAdj =
+ builder.create<mlir::arith::SubIOp>(
+ loc, idxTy, val, lb);
+ addComponent(
+ builder.createConvert(loc, idxTy, ivAdj));
+ },
+ [&](const auto &) {
+ fir::emitFatalError(
+ loc, "multiple components along single path "
+ "generating array subexpressions");
+ }},
+ ss.u);
+ ssIndex++;
+ }
+ }
+ ty = fir::unwrapSequenceType(ty);
+ },
+ [&](const Fortran::evaluate::Component *x) {
+ auto fieldTy = fir::FieldType::get(builder.getContext());
+ llvm::StringRef name = toStringRef(getLastSym(*x).name());
+ auto recTy = ty.cast<fir::RecordType>();
+ ty = recTy.getType(name);
+ auto fld = builder.create<fir::FieldIndexOp>(
+ loc, fieldTy, name, recTy, fir::getTypeParams(arrayExv));
+ addComponent(fld);
+ }},
+ v);
+ atBase = false;
}
- auto eleTy = arrTy.cast<fir::SequenceType>().getEleTy();
- if (isReferentiallyOpaque()) {
- // Semantics are an opaque reference to an array.
- // This case forwards a continuation that will generate the address
- // arithmetic to the array element. This does not have copy-in/copy-out
- // semantics. No attempt to copy the array value will be made during the
- // interpretation of the Fortran statement.
- mlir::Type refEleTy = builder.getRefType(eleTy);
- return [=](IterSpace iters) -> ExtValue {
- // ArrayCoorOp does not expect zero based indices.
- llvm::SmallVector<mlir::Value> indices = fir::factory::originateIndices(
- loc, builder, memref.getType(), shape, iters.iterVec());
- mlir::Value coor = builder.create<fir::ArrayCoorOp>(
- loc, refEleTy, memref, shape, slice, indices,
- fir::getTypeParams(extMemref));
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- llvm::SmallVector<mlir::Value> substringBounds;
- populateBounds(substringBounds, components.substring);
- if (!substringBounds.empty()) {
+ semant = saveSemant;
+ ty = fir::unwrapSequenceType(ty);
+ components.applied = true;
+ return ty;
+ }
+
+ llvm::SmallVector<mlir::Value> genSubstringBounds(ComponentPath &components) {
+ llvm::SmallVector<mlir::Value> result;
+ if (components.substring)
+ populateBounds(result, components.substring);
+ return result;
+ }
+
+ CC applyPathToArrayLoad(fir::ArrayLoadOp load, ComponentPath &components) {
+ mlir::Location loc = getLoc();
+ auto revPath = components.reversePath;
+ fir::ExtendedValue arrayExv =
+ arrayLoadExtValue(builder, loc, load, {}, load);
+ mlir::Type eleTy = lowerPath(arrayExv, components);
+ auto currentPC = components.pc;
+ auto pc = [=, prefix = components.prefixComponents,
+ suffix = components.suffixComponents](IterSpace iters) {
+ IterationSpace newIters = currentPC(iters);
+ // Add path prefix and suffix.
+ IterationSpace addIters(newIters, prefix, suffix);
+ return addIters;
+ };
+ components.pc = [=](IterSpace iters) { return iters; };
+ llvm::SmallVector<mlir::Value> substringBounds =
+ genSubstringBounds(components);
+ if (isProjectedCopyInCopyOut()) {
+ destination = load;
+ auto lambda = [=, esp = this->explicitSpace](IterSpace iters) mutable {
+ mlir::Value innerArg = esp->findArgumentOfLoad(load);
+ if (isAdjustedArrayElementType(eleTy)) {
+ mlir::Type eleRefTy = builder.getRefType(eleTy);
+ auto arrayOp = builder.create<fir::ArrayAccessOp>(
+ loc, eleRefTy, innerArg, iters.iterVec(), load.getTypeparams());
+ if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
mlir::Value dstLen = fir::factory::genLenOfCharacter(
- builder, loc, arrTy.cast<fir::SequenceType>(), memref,
- fir::getTypeParams(extMemref), iters.iterVec(),
+ builder, loc, load, iters.iterVec(), substringBounds);
+ fir::ArrayAmendOp amend = createCharArrayAmend(
+ loc, builder, arrayOp, dstLen, iters.elementExv(), innerArg,
substringBounds);
- fir::CharBoxValue dstChar(coor, dstLen);
- return fir::factory::CharacterExprHelper{builder, loc}
- .createSubstring(dstChar, substringBounds);
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(), amend,
+ dstLen);
+ } else if (fir::isa_derived(eleTy)) {
+ fir::ArrayAmendOp amend =
+ createDerivedArrayAmend(loc, load, builder, arrayOp,
+ iters.elementExv(), eleTy, innerArg);
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(),
+ amend);
}
+ assert(eleTy.isa<fir::SequenceType>());
+ TODO(loc, "array (as element) assignment");
}
- return fir::factory::arraySectionElementToExtendedValue(
- builder, loc, extMemref, coor, slice);
+ mlir::Value castedElement =
+ builder.createConvert(loc, eleTy, iters.getElement());
+ auto update = builder.create<fir::ArrayUpdateOp>(
+ loc, innerArg.getType(), innerArg, castedElement, iters.iterVec(),
+ load.getTypeparams());
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(), update);
};
- }
- auto arrLoad = builder.create<fir::ArrayLoadOp>(
- loc, arrTy, memref, shape, slice, fir::getTypeParams(extMemref));
- mlir::Value arrLd = arrLoad.getResult();
- if (isProjectedCopyInCopyOut()) {
- // Semantics are projected copy-in copy-out.
- // The backing store of the destination of an array expression may be
- // partially modified. These updates are recorded in FIR by forwarding a
- // continuation that generates an `array_update` Op. The destination is
- // always loaded at the beginning of the statement and merged at the
- // end.
- destination = arrLoad;
- auto lambda = ccStoreToDest.hasValue()
- ? ccStoreToDest.getValue()
- : defaultStoreToDestination(components.substring);
- return [=](IterSpace iters) -> ExtValue { return lambda(iters); };
+ return [=](IterSpace iters) mutable { return lambda(pc(iters)); };
}
if (isCustomCopyInCopyOut()) {
// Create an array_modify to get the LHS element address and indicate
- // the assignment, the actual assignment must be implemented in
- // ccStoreToDest.
- destination = arrLoad;
- return [=](IterSpace iters) -> ExtValue {
- mlir::Value innerArg = iters.innerArgument();
- mlir::Type resTy = innerArg.getType();
- mlir::Type eleTy = fir::applyPathToType(resTy, iters.iterVec());
+ // the assignment, and create the call to the user defined assignment.
+ destination = load;
+ auto lambda = [=](IterSpace iters) mutable {
+ mlir::Value innerArg = explicitSpace->findArgumentOfLoad(load);
mlir::Type refEleTy =
fir::isa_ref_type(eleTy) ? eleTy : builder.getRefType(eleTy);
auto arrModify = builder.create<fir::ArrayModifyOp>(
- loc, mlir::TypeRange{refEleTy, resTy}, innerArg, iters.iterVec(),
- destination.getTypeparams());
- return abstractArrayExtValue(arrModify.getResult(1));
- };
- }
- if (isCopyInCopyOut()) {
- // Semantics are copy-in copy-out.
- // The continuation simply forwards the result of the `array_load` Op,
- // which is the value of the array as it was when loaded. All data
- // references with rank > 0 in an array expression typically have
- // copy-in copy-out semantics.
- return [=](IterSpace) -> ExtValue { return arrLd; };
- }
- mlir::Operation::operand_range arrLdTypeParams = arrLoad.getTypeparams();
- if (isValueAttribute()) {
- // Semantics are value attribute.
- // Here the continuation will `array_fetch` a value from an array and
- // then store that value in a temporary. One can thus imitate pass by
- // value even when the call is pass by reference.
- return [=](IterSpace iters) -> ExtValue {
- mlir::Value base;
- mlir::Type eleTy = fir::applyPathToType(arrTy, iters.iterVec());
- if (isAdjustedArrayElementType(eleTy)) {
- mlir::Type eleRefTy = builder.getRefType(eleTy);
- base = builder.create<fir::ArrayAccessOp>(
- loc, eleRefTy, arrLd, iters.iterVec(), arrLdTypeParams);
- } else {
- base = builder.create<fir::ArrayFetchOp>(
- loc, eleTy, arrLd, iters.iterVec(), arrLdTypeParams);
- }
- mlir::Value temp = builder.createTemporary(
- loc, base.getType(),
- llvm::ArrayRef<mlir::NamedAttribute>{
- Fortran::lower::getAdaptToByRefAttr(builder)});
- builder.create<fir::StoreOp>(loc, base, temp);
- return fir::factory::arraySectionElementToExtendedValue(
- builder, loc, extMemref, temp, slice);
+ loc, mlir::TypeRange{refEleTy, innerArg.getType()}, innerArg,
+ iters.iterVec(), load.getTypeparams());
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(),
+ arrModify.getResult(1));
};
+ return [=](IterSpace iters) mutable { return lambda(pc(iters)); };
}
- // In the default case, the array reference forwards an `array_fetch` or
- // `array_access` Op in the continuation.
- return [=](IterSpace iters) -> ExtValue {
- mlir::Type eleTy = fir::applyPathToType(arrTy, iters.iterVec());
- if (isAdjustedArrayElementType(eleTy)) {
- mlir::Type eleRefTy = builder.getRefType(eleTy);
- mlir::Value arrayOp = builder.create<fir::ArrayAccessOp>(
- loc, eleRefTy, arrLd, iters.iterVec(), arrLdTypeParams);
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- llvm::SmallVector<mlir::Value> substringBounds;
- populateBounds(substringBounds, components.substring);
+ auto lambda = [=, semant = this->semant](IterSpace iters) mutable {
+ if (semant == ConstituentSemantics::RefOpaque ||
+ isAdjustedArrayElementType(eleTy)) {
+ mlir::Type resTy = builder.getRefType(eleTy);
+ // Use array element reference semantics.
+ auto access = builder.create<fir::ArrayAccessOp>(
+ loc, resTy, load, iters.iterVec(), load.getTypeparams());
+ mlir::Value newBase = access;
+ if (fir::isa_char(eleTy)) {
+ mlir::Value dstLen = fir::factory::genLenOfCharacter(
+ builder, loc, load, iters.iterVec(), substringBounds);
if (!substringBounds.empty()) {
- mlir::Value dstLen = fir::factory::genLenOfCharacter(
- builder, loc, arrLoad, iters.iterVec(), substringBounds);
- fir::CharBoxValue dstChar(arrayOp, dstLen);
- return fir::factory::CharacterExprHelper{builder, loc}
- .createSubstring(dstChar, substringBounds);
+ fir::CharBoxValue charDst{access, dstLen};
+ fir::factory::CharacterExprHelper helper{builder, loc};
+ charDst = helper.createSubstring(charDst, substringBounds);
+ newBase = charDst.getAddr();
}
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(), newBase,
+ dstLen);
}
- return fir::factory::arraySectionElementToExtendedValue(
- builder, loc, extMemref, arrayOp, slice);
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(), newBase);
}
- auto arrFetch = builder.create<fir::ArrayFetchOp>(
- loc, eleTy, arrLd, iters.iterVec(), arrLdTypeParams);
- return fir::factory::arraySectionElementToExtendedValue(
- builder, loc, extMemref, arrFetch, slice);
+ auto fetch = builder.create<fir::ArrayFetchOp>(
+ loc, eleTy, load, iters.iterVec(), load.getTypeparams());
+ return arrayLoadExtValue(builder, loc, load, iters.iterVec(), fetch);
+ };
+ return [=](IterSpace iters) mutable {
+ auto newIters = pc(iters);
+ return lambda(newIters);
};
}
-private:
- void determineShapeOfDest(const fir::ExtendedValue &lhs) {
- destShape = fir::factory::getExtents(builder, getLoc(), lhs);
+ template <typename A>
+ CC genImplicitArrayAccess(const A &x, ComponentPath &components) {
+ components.reversePath.push_back(ImplicitSubscripts{});
+ ExtValue exv = asScalarRef(x);
+ lowerPath(exv, components);
+ auto lambda = genarr(exv, components);
+ return [=](IterSpace iters) { return lambda(components.pc(iters)); };
}
-
- void determineShapeOfDest(const Fortran::lower::SomeExpr &lhs) {
- if (!destShape.empty())
- return;
- if (explicitSpaceIsActive() && determineShapeWithSlice(lhs))
- return;
- mlir::Type idxTy = builder.getIndexType();
- mlir::Location loc = getLoc();
- if (std::optional<Fortran::evaluate::ConstantSubscripts> constantShape =
- Fortran::evaluate::GetConstantExtents(converter.getFoldingContext(),
- lhs))
- for (Fortran::common::ConstantSubscript extent : *constantShape)
- destShape.push_back(builder.createIntegerConstant(loc, idxTy, extent));
+ CC genImplicitArrayAccess(const Fortran::evaluate::NamedEntity &x,
+ ComponentPath &components) {
+ if (x.IsSymbol())
+ return genImplicitArrayAccess(getFirstSym(x), components);
+ return genImplicitArrayAccess(x.GetComponent(), components);
}
- bool genShapeFromDataRef(const Fortran::semantics::Symbol &x) {
- return false;
- }
- bool genShapeFromDataRef(const Fortran::evaluate::CoarrayRef &) {
- TODO(getLoc(), "coarray ref");
- return false;
- }
- bool genShapeFromDataRef(const Fortran::evaluate::Component &x) {
- return x.base().Rank() > 0 ? genShapeFromDataRef(x.base()) : false;
- }
- bool genShapeFromDataRef(const Fortran::evaluate::ArrayRef &x) {
- if (x.Rank() == 0)
- return false;
- if (x.base().Rank() > 0)
- if (genShapeFromDataRef(x.base()))
- return true;
- // x has rank and x.base did not produce a shape.
- ExtValue exv = x.base().IsSymbol() ? asScalarRef(getFirstSym(x.base()))
- : asScalarRef(x.base().GetComponent());
+ template <typename A>
+ CC genAsScalar(const A &x) {
mlir::Location loc = getLoc();
- mlir::IndexType idxTy = builder.getIndexType();
- llvm::SmallVector<mlir::Value> definedShape =
- fir::factory::getExtents(builder, loc, exv);
- mlir::Value one = builder.createIntegerConstant(loc, idxTy, 1);
- for (auto ss : llvm::enumerate(x.subscript())) {
- std::visit(Fortran::common::visitors{
- [&](const Fortran::evaluate::Triplet &trip) {
- // For a subscript of triple notation, we compute the
- // range of this dimension of the iteration space.
- auto lo = [&]() {
- if (auto optLo = trip.lower())
- return fir::getBase(asScalar(*optLo));
- return getLBound(exv, ss.index(), one);
- }();
- auto hi = [&]() {
- if (auto optHi = trip.upper())
- return fir::getBase(asScalar(*optHi));
- return getUBound(exv, ss.index(), one);
- }();
- auto step = builder.createConvert(
- loc, idxTy, fir::getBase(asScalar(trip.stride())));
- auto extent = builder.genExtentFromTriplet(loc, lo, hi,
- step, idxTy);
- destShape.push_back(extent);
- },
- [&](auto) {}},
- ss.value().u);
+ if (isProjectedCopyInCopyOut()) {
+ return [=, &x, builder = &converter.getFirOpBuilder()](
+ IterSpace iters) -> ExtValue {
+ ExtValue exv = asScalarRef(x);
+ mlir::Value val = fir::getBase(exv);
+ mlir::Type eleTy = fir::unwrapRefType(val.getType());
+ if (isAdjustedArrayElementType(eleTy)) {
+ if (fir::isa_char(eleTy)) {
+ fir::factory::CharacterExprHelper{*builder, loc}.createAssign(
+ exv, iters.elementExv());
+ } else if (fir::isa_derived(eleTy)) {
+ TODO(loc, "assignment of derived type");
+ } else {
+ fir::emitFatalError(loc, "array type not expected in scalar");
+ }
+ } else {
+ builder->create<fir::StoreOp>(loc, iters.getElement(), val);
+ }
+ return exv;
+ };
}
- return true;
- }
- bool genShapeFromDataRef(const Fortran::evaluate::NamedEntity &x) {
- if (x.IsSymbol())
- return genShapeFromDataRef(getFirstSym(x));
- return genShapeFromDataRef(x.GetComponent());
- }
- bool genShapeFromDataRef(const Fortran::evaluate::DataRef &x) {
- return std::visit([&](const auto &v) { return genShapeFromDataRef(v); },
- x.u);
- }
-
- /// When in an explicit space, the ranked component must be evaluated to
- /// determine the actual number of iterations when slicing triples are
- /// present. Lower these expressions here.
- bool determineShapeWithSlice(const Fortran::lower::SomeExpr &lhs) {
- LLVM_DEBUG(Fortran::lower::DumpEvaluateExpr::dump(
- llvm::dbgs() << "determine shape of:\n", lhs));
- // FIXME: We may not want to use ExtractDataRef here since it doesn't deal
- // with substrings, etc.
- std::optional<Fortran::evaluate::DataRef> dref =
- Fortran::evaluate::ExtractDataRef(lhs);
- return dref.has_value() ? genShapeFromDataRef(*dref) : false;
- }
-
- ExtValue lowerArrayExpression(const Fortran::lower::SomeExpr &exp) {
- mlir::Type resTy = converter.genType(exp);
- return std::visit(
- [&](const auto &e) { return lowerArrayExpression(genarr(e), resTy); },
- exp.u);
- }
- ExtValue lowerArrayExpression(const ExtValue &exv) {
- assert(!explicitSpace);
- mlir::Type resTy = fir::unwrapPassByRefType(fir::getBase(exv).getType());
- return lowerArrayExpression(genarr(exv), resTy);
- }
-
- void populateBounds(llvm::SmallVectorImpl<mlir::Value> &bounds,
- const Fortran::evaluate::Substring *substring) {
- if (!substring)
- return;
- bounds.push_back(fir::getBase(asScalar(substring->lower())));
- if (auto upper = substring->upper())
- bounds.push_back(fir::getBase(asScalar(*upper)));
+ return [=, &x](IterSpace) { return asScalar(x); };
}
- /// Default store to destination implementation.
- /// This implements the default case, which is to assign the value in
- /// `iters.element` into the destination array, `iters.innerArgument`. Handles
- /// by value and by reference assignment.
- CC defaultStoreToDestination(const Fortran::evaluate::Substring *substring) {
- return [=](IterSpace iterSpace) -> ExtValue {
- mlir::Location loc = getLoc();
- mlir::Value innerArg = iterSpace.innerArgument();
- fir::ExtendedValue exv = iterSpace.elementExv();
- mlir::Type arrTy = innerArg.getType();
- mlir::Type eleTy = fir::applyPathToType(arrTy, iterSpace.iterVec());
- if (isAdjustedArrayElementType(eleTy)) {
- // The elemental update is in the memref domain. Under this semantics,
- // we must always copy the computed new element from its location in
- // memory into the destination array.
- mlir::Type resRefTy = builder.getRefType(eleTy);
- // Get a reference to the array element to be amended.
- auto arrayOp = builder.create<fir::ArrayAccessOp>(
- loc, resRefTy, innerArg, iterSpace.iterVec(),
- destination.getTypeparams());
- if (auto charTy = eleTy.dyn_cast<fir::CharacterType>()) {
- llvm::SmallVector<mlir::Value> substringBounds;
- populateBounds(substringBounds, substring);
- mlir::Value dstLen = fir::factory::genLenOfCharacter(
- builder, loc, destination, iterSpace.iterVec(), substringBounds);
- fir::ArrayAmendOp amend = createCharArrayAmend(
- loc, builder, arrayOp, dstLen, exv, innerArg, substringBounds);
- return abstractArrayExtValue(amend, dstLen);
- }
- if (fir::isa_derived(eleTy)) {
- fir::ArrayAmendOp amend = createDerivedArrayAmend(
- loc, destination, builder, arrayOp, exv, eleTy, innerArg);
- return abstractArrayExtValue(amend /*FIXME: typeparams?*/);
- }
- assert(eleTy.isa<fir::SequenceType>() && "must be an array");
- TODO(loc, "array (as element) assignment");
- }
- // By value semantics. The element is being assigned by value.
- mlir::Value ele = builder.createConvert(loc, eleTy, fir::getBase(exv));
- auto update = builder.create<fir::ArrayUpdateOp>(
- loc, arrTy, innerArg, ele, iterSpace.iterVec(),
- destination.getTypeparams());
- return abstractArrayExtValue(update);
+ CC genarr(const Fortran::semantics::Symbol &x, ComponentPath &components) {
+ if (explicitSpaceIsActive()) {
+ if (x.Rank() > 0)
+ components.reversePath.push_back(ImplicitSubscripts{});
+ if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x))
+ return applyPathToArrayLoad(load, components);
+ } else {
+ return genImplicitArrayAccess(x, components);
+ }
+ if (pathIsEmpty(components))
+ return genAsScalar(x);
+ mlir::Location loc = getLoc();
+ return [=](IterSpace) -> ExtValue {
+ fir::emitFatalError(loc, "reached symbol with path");
};
}
- /// For an elemental array expression.
- /// 1. Lower the scalars and array loads.
- /// 2. Create the iteration space.
- /// 3. Create the element-by-element computation in the loop.
- /// 4. Return the resulting array value.
- /// If no destination was set in the array context, a temporary of
- /// \p resultTy will be created to hold the evaluated expression.
- /// Otherwise, \p resultTy is ignored and the expression is evaluated
- /// in the destination. \p f is a continuation built from an
- /// evaluate::Expr or an ExtendedValue.
- ExtValue lowerArrayExpression(CC f, mlir::Type resultTy) {
+ /// Lower a component path with or without rank.
+ /// Example: <code>array%baz%qux%waldo</code>
+ CC genarr(const Fortran::evaluate::Component &x, ComponentPath &components) {
+ if (explicitSpaceIsActive()) {
+ if (x.base().Rank() == 0 && x.Rank() > 0)
+ components.reversePath.push_back(ImplicitSubscripts{});
+ if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x))
+ return applyPathToArrayLoad(load, components);
+ } else {
+ if (x.base().Rank() == 0)
+ return genImplicitArrayAccess(x, components);
+ }
+ bool atEnd = pathIsEmpty(components);
+ if (!getLastSym(x).test(Fortran::semantics::Symbol::Flag::ParentComp))
+ // Skip parent components; their components are placed directly in the
+ // object.
+ components.reversePath.push_back(&x);
+ auto result = genarr(x.base(), components);
+ if (components.applied)
+ return result;
+ if (atEnd)
+ return genAsScalar(x);
mlir::Location loc = getLoc();
- auto [iterSpace, insPt] = genIterSpace(resultTy);
- auto exv = f(iterSpace);
- iterSpace.setElement(std::move(exv));
- auto lambda = ccStoreToDest.hasValue()
- ? ccStoreToDest.getValue()
- : defaultStoreToDestination(/*substring=*/nullptr);
- mlir::Value updVal = fir::getBase(lambda(iterSpace));
- finalizeElementCtx();
- builder.create<fir::ResultOp>(loc, updVal);
- builder.restoreInsertionPoint(insPt);
- return abstractArrayExtValue(iterSpace.outerResult());
+ return [=](IterSpace) -> ExtValue {
+ fir::emitFatalError(loc, "reached component with path");
+ };
}
- /// Compute the shape of a slice.
- llvm::SmallVector<mlir::Value> computeSliceShape(mlir::Value slice) {
- llvm::SmallVector<mlir::Value> slicedShape;
- auto slOp = mlir::cast<fir::SliceOp>(slice.getDefiningOp());
- mlir::Operation::operand_range triples = slOp.getTriples();
- mlir::IndexType idxTy = builder.getIndexType();
- mlir::Location loc = getLoc();
- for (unsigned i = 0, end = triples.size(); i < end; i += 3) {
- if (!mlir::isa_and_nonnull<fir::UndefOp>(
- triples[i + 1].getDefiningOp())) {
- // (..., lb:ub:step, ...) case: extent = max((ub-lb+step)/step, 0)
- // See Fortran 2018 9.5.3.3.2 section for more details.
- mlir::Value res = builder.genExtentFromTriplet(
- loc, triples[i], triples[i + 1], triples[i + 2], idxTy);
- slicedShape.emplace_back(res);
- } else {
- // do nothing. `..., i, ...` case, so dimension is dropped.
+ /// Array reference with subscripts. If this has rank > 0, this is a form
+ /// of an array section (slice).
+ ///
+ /// There are two "slicing" primitives that may be applied on a dimension by
+ /// dimension basis: (1) triple notation and (2) vector addressing. Since
+ /// dimensions can be selectively sliced, some dimensions may contain
+ /// regular scalar expressions and those dimensions do not participate in
+ /// the array expression evaluation.
+ CC genarr(const Fortran::evaluate::ArrayRef &x, ComponentPath &components) {
+ if (explicitSpaceIsActive()) {
+ if (Fortran::lower::isRankedArrayAccess(x))
+ components.reversePath.push_back(ImplicitSubscripts{});
+ if (fir::ArrayLoadOp load = explicitSpace->findBinding(&x)) {
+ components.reversePath.push_back(&x);
+ return applyPathToArrayLoad(load, components);
+ }
+ } else {
+ if (Fortran::lower::isRankedArrayAccess(x)) {
+ components.reversePath.push_back(&x);
+ return genImplicitArrayAccess(x.base(), components);
}
}
- return slicedShape;
+ bool atEnd = pathIsEmpty(components);
+ components.reversePath.push_back(&x);
+ auto result = genarr(x.base(), components);
+ if (components.applied)
+ return result;
+ mlir::Location loc = getLoc();
+ if (atEnd) {
+ if (x.Rank() == 0)
+ return genAsScalar(x);
+ fir::emitFatalError(loc, "expected scalar");
+ }
+ return [=](IterSpace) -> ExtValue {
+ fir::emitFatalError(loc, "reached arrayref with path");
+ };
}
- /// Get the shape from an ArrayOperand. The shape of the array is adjusted if
- /// the array was sliced.
- llvm::SmallVector<mlir::Value> getShape(ArrayOperand array) {
- if (array.slice)
- return computeSliceShape(array.slice);
- if (array.memref.getType().isa<fir::BoxType>())
- return fir::factory::readExtents(builder, getLoc(),
- fir::BoxValue{array.memref});
- std::vector<mlir::Value, std::allocator<mlir::Value>> extents =
- fir::factory::getExtents(array.shape);
- return {extents.begin(), extents.end()};
+ CC genarr(const Fortran::evaluate::CoarrayRef &x, ComponentPath &components) {
+ TODO(getLoc(), "coarray reference");
}
- /// Get the shape from an ArrayLoad.
- llvm::SmallVector<mlir::Value> getShape(fir::ArrayLoadOp arrayLoad) {
- return getShape(ArrayOperand{arrayLoad.getMemref(), arrayLoad.getShape(),
- arrayLoad.getSlice()});
+ CC genarr(const Fortran::evaluate::NamedEntity &x,
+ ComponentPath &components) {
+ return x.IsSymbol() ? genarr(getFirstSym(x), components)
+ : genarr(x.GetComponent(), components);
}
- /// Returns the first array operand that may not be absent. If all
- /// array operands may be absent, return the first one.
- const ArrayOperand &getInducingShapeArrayOperand() const {
- assert(!arrayOperands.empty());
- for (const ArrayOperand &op : arrayOperands)
- if (!op.mayBeAbsent)
- return op;
- // If all arrays operand appears in optional position, then none of them
- // is allowed to be absent as per 15.5.2.12 point 3. (6). Just pick the
- // first operands.
- // TODO: There is an opportunity to add a runtime check here that
- // this array is present as required.
- return arrayOperands[0];
+ CC genarr(const Fortran::evaluate::DataRef &x, ComponentPath &components) {
+ return std::visit([&](const auto &v) { return genarr(v, components); },
+ x.u);
}
- /// Generate the shape of the iteration space over the array expression. The
- /// iteration space may be implicit, explicit, or both. If it is implied it is
- /// based on the destination and operand array loads, or an optional
- /// Fortran::evaluate::Shape from the front end. If the shape is explicit,
- /// this returns any implicit shape component, if it exists.
- llvm::SmallVector<mlir::Value> genIterationShape() {
- // Use the precomputed destination shape.
- if (!destShape.empty())
- return destShape;
- // Otherwise, use the destination's shape.
- if (destination)
- return getShape(destination);
- // Otherwise, use the first ArrayLoad operand shape.
- if (!arrayOperands.empty())
- return getShape(getInducingShapeArrayOperand());
- fir::emitFatalError(getLoc(),
- "failed to compute the array expression shape");
+ bool pathIsEmpty(const ComponentPath &components) {
+ return components.reversePath.empty();
}
explicit ArrayExprLowering(Fortran::lower::AbstractConverter &converter,
return semant == ConstituentSemantics::ProjectedCopyInCopyOut;
}
+ // ???: Do we still need this?
inline bool isCustomCopyInCopyOut() {
return semant == ConstituentSemantics::CustomCopyInCopyOut;
}
const Fortran::lower::SomeExpr &expr, Fortran::lower::SymMap &symMap,
Fortran::lower::StatementContext &stmtCtx) {
LLVM_DEBUG(expr.AsFortran(llvm::dbgs() << "address: ") << '\n');
- return ScalarExprLowering{loc, converter, symMap, stmtCtx}.gen(expr);
+ return ScalarExprLowering(loc, converter, symMap, stmtCtx).gen(expr);
}
fir::ExtendedValue Fortran::lower::createInitializerAddress(
return ScalarExprLowering(loc, converter, symMap, stmtCtx, &init).gen(expr);
}
+void Fortran::lower::createSomeArrayAssignment(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
+ Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "onto array: ") << '\n';
+ rhs.AsFortran(llvm::dbgs() << "assign expression: ") << '\n';);
+ ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
+}
+
+void Fortran::lower::createSomeArrayAssignment(
+ Fortran::lower::AbstractConverter &converter, const fir::ExtendedValue &lhs,
+ const Fortran::lower::SomeExpr &rhs, Fortran::lower::SymMap &symMap,
+ Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(llvm::dbgs() << "onto array: " << lhs << '\n';
+ rhs.AsFortran(llvm::dbgs() << "assign expression: ") << '\n';);
+ ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
+}
+void Fortran::lower::createSomeArrayAssignment(
+ Fortran::lower::AbstractConverter &converter, const fir::ExtendedValue &lhs,
+ const fir::ExtendedValue &rhs, Fortran::lower::SymMap &symMap,
+ Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(llvm::dbgs() << "onto array: " << lhs << '\n';
+ llvm::dbgs() << "assign expression: " << rhs << '\n';);
+ ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
+}
+
+void Fortran::lower::createAnyMaskedArrayAssignment(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
+ Fortran::lower::ExplicitIterSpace &explicitSpace,
+ Fortran::lower::ImplicitIterSpace &implicitSpace,
+ Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "onto array: ") << '\n';
+ rhs.AsFortran(llvm::dbgs() << "assign expression: ")
+ << " given the explicit iteration space:\n"
+ << explicitSpace << "\n and implied mask conditions:\n"
+ << implicitSpace << '\n';);
+ ArrayExprLowering::lowerAnyMaskedArrayAssignment(
+ converter, symMap, stmtCtx, lhs, rhs, explicitSpace, implicitSpace);
+}
+
+void Fortran::lower::createAllocatableArrayAssignment(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
+ Fortran::lower::ExplicitIterSpace &explicitSpace,
+ Fortran::lower::ImplicitIterSpace &implicitSpace,
+ Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "defining array: ") << '\n';
+ rhs.AsFortran(llvm::dbgs() << "assign expression: ")
+ << " given the explicit iteration space:\n"
+ << explicitSpace << "\n and implied mask conditions:\n"
+ << implicitSpace << '\n';);
+ ArrayExprLowering::lowerAllocatableArrayAssignment(
+ converter, symMap, stmtCtx, lhs, rhs, explicitSpace, implicitSpace);
+}
+
+fir::ExtendedValue Fortran::lower::createSomeArrayTempValue(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::lower::SomeExpr &expr, Fortran::lower::SymMap &symMap,
+ Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(expr.AsFortran(llvm::dbgs() << "array value: ") << '\n');
+ return ArrayExprLowering::lowerNewArrayExpression(converter, symMap, stmtCtx,
+ expr);
+}
+
+void Fortran::lower::createLazyArrayTempValue(
+ Fortran::lower::AbstractConverter &converter,
+ const Fortran::lower::SomeExpr &expr, mlir::Value raggedHeader,
+ Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
+ LLVM_DEBUG(expr.AsFortran(llvm::dbgs() << "array value: ") << '\n');
+ ArrayExprLowering::lowerLazyArrayExpression(converter, symMap, stmtCtx, expr,
+ raggedHeader);
+}
+
fir::ExtendedValue
Fortran::lower::createSomeArrayBox(Fortran::lower::AbstractConverter &converter,
const Fortran::lower::SomeExpr &expr,
Fortran::lower::SymMap &symMap,
Fortran::lower::StatementContext &stmtCtx) {
if (x->base().IsSymbol())
- return genArrayLoad(loc, converter, builder, &x->base().GetLastSymbol(),
- symMap, stmtCtx);
+ return genArrayLoad(loc, converter, builder, &getLastSym(x->base()), symMap,
+ stmtCtx);
return genArrayLoad(loc, converter, builder, &x->base().GetComponent(),
symMap, stmtCtx);
}
esp.incrementCounter();
}
-void Fortran::lower::createSomeArrayAssignment(
- Fortran::lower::AbstractConverter &converter,
- const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
- Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "onto array: ") << '\n';
- rhs.AsFortran(llvm::dbgs() << "assign expression: ") << '\n';);
- ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
-}
-
-void Fortran::lower::createSomeArrayAssignment(
- Fortran::lower::AbstractConverter &converter, const fir::ExtendedValue &lhs,
- const Fortran::lower::SomeExpr &rhs, Fortran::lower::SymMap &symMap,
- Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(llvm::dbgs() << "onto array: " << lhs << '\n';
- rhs.AsFortran(llvm::dbgs() << "assign expression: ") << '\n';);
- ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
-}
-
-void Fortran::lower::createSomeArrayAssignment(
- Fortran::lower::AbstractConverter &converter, const fir::ExtendedValue &lhs,
- const fir::ExtendedValue &rhs, Fortran::lower::SymMap &symMap,
- Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(llvm::dbgs() << "onto array: " << lhs << '\n';
- llvm::dbgs() << "assign expression: " << rhs << '\n';);
- ArrayExprLowering::lowerArrayAssignment(converter, symMap, stmtCtx, lhs, rhs);
-}
-
-void Fortran::lower::createAnyMaskedArrayAssignment(
- Fortran::lower::AbstractConverter &converter,
- const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
- Fortran::lower::ExplicitIterSpace &explicitSpace,
- Fortran::lower::ImplicitIterSpace &implicitSpace,
- Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "onto array: ") << '\n';
- rhs.AsFortran(llvm::dbgs() << "assign expression: ")
- << " given the explicit iteration space:\n"
- << explicitSpace << "\n and implied mask conditions:\n"
- << implicitSpace << '\n';);
- ArrayExprLowering::lowerAnyMaskedArrayAssignment(
- converter, symMap, stmtCtx, lhs, rhs, explicitSpace, implicitSpace);
-}
-
-void Fortran::lower::createAllocatableArrayAssignment(
- Fortran::lower::AbstractConverter &converter,
- const Fortran::lower::SomeExpr &lhs, const Fortran::lower::SomeExpr &rhs,
- Fortran::lower::ExplicitIterSpace &explicitSpace,
- Fortran::lower::ImplicitIterSpace &implicitSpace,
- Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(lhs.AsFortran(llvm::dbgs() << "defining array: ") << '\n';
- rhs.AsFortran(llvm::dbgs() << "assign expression: ")
- << " given the explicit iteration space:\n"
- << explicitSpace << "\n and implied mask conditions:\n"
- << implicitSpace << '\n';);
- ArrayExprLowering::lowerAllocatableArrayAssignment(
- converter, symMap, stmtCtx, lhs, rhs, explicitSpace, implicitSpace);
-}
-
-fir::ExtendedValue Fortran::lower::createSomeArrayTempValue(
- Fortran::lower::AbstractConverter &converter,
- const Fortran::lower::SomeExpr &expr, Fortran::lower::SymMap &symMap,
- Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(expr.AsFortran(llvm::dbgs() << "array value: ") << '\n');
- return ArrayExprLowering::lowerNewArrayExpression(converter, symMap, stmtCtx,
- expr);
-}
-
-void Fortran::lower::createLazyArrayTempValue(
- Fortran::lower::AbstractConverter &converter,
- const Fortran::lower::SomeExpr &expr, mlir::Value raggedHeader,
- Fortran::lower::SymMap &symMap, Fortran::lower::StatementContext &stmtCtx) {
- LLVM_DEBUG(expr.AsFortran(llvm::dbgs() << "array value: ") << '\n');
- ArrayExprLowering::lowerLazyArrayExpression(converter, symMap, stmtCtx, expr,
- raggedHeader);
-}
-
mlir::Value Fortran::lower::genMaxWithZero(fir::FirOpBuilder &builder,
mlir::Location loc,
mlir::Value value) {
if (llvm::Optional<int64_t> len = box.getCharLenConst())
return builder.createIntegerConstant(loc, lenTy, *len);
if (llvm::Optional<Fortran::lower::SomeExpr> lenExpr = box.getCharLenExpr())
- return genScalarValue(converter, loc, *lenExpr, symMap, stmtCtx);
+ // If the length expression is negative, the length is zero. See F2018
+ // 7.4.4.2 point 5.
+ return Fortran::lower::genMaxWithZero(
+ builder, loc,
+ genScalarValue(converter, loc, *lenExpr, symMap, stmtCtx));
return mlir::Value{};
}
args);
}
+mlir::Value Fortran::lower::genMin(fir::FirOpBuilder &builder,
+ mlir::Location loc,
+ llvm::ArrayRef<mlir::Value> args) {
+ assert(args.size() > 0 && "min requires at least one argument");
+ return IntrinsicLibrary{builder, loc}
+ .genExtremum<Extremum::Min, ExtremumBehavior::MinMaxss>(args[0].getType(),
+ args);
+}
+
mlir::Value Fortran::lower::genPow(fir::FirOpBuilder &builder,
mlir::Location loc, mlir::Type type,
mlir::Value x, mlir::Value y) {
#include "flang/Optimizer/Builder/Character.h"
#include "flang/Lower/Todo.h"
#include "flang/Optimizer/Builder/DoLoopHelper.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
#include "llvm/Support/Debug.h"
#include <optional>
#define DEBUG_TYPE "flang-lower-character"
-using namespace mlir;
-
//===----------------------------------------------------------------------===//
// CharacterExprHelper implementation
//===----------------------------------------------------------------------===//
-/// Unwrap base fir.char<kind,len> type.
-static fir::CharacterType recoverCharacterType(mlir::Type type) {
+/// Unwrap all the ref and box types and return the inner element type.
+static mlir::Type unwrapBoxAndRef(mlir::Type type) {
if (auto boxType = type.dyn_cast<fir::BoxCharType>())
return boxType.getEleTy();
while (true) {
else
break;
}
- return fir::unwrapSequenceType(type).cast<fir::CharacterType>();
+ return type;
+}
+
+/// Unwrap base fir.char<kind,len> type.
+static fir::CharacterType recoverCharacterType(mlir::Type type) {
+ type = fir::unwrapSequenceType(unwrapBoxAndRef(type));
+ if (auto charTy = type.dyn_cast<fir::CharacterType>())
+ return charTy;
+ llvm::report_fatal_error("expected a character type");
+}
+
+bool fir::factory::CharacterExprHelper::isCharacterScalar(mlir::Type type) {
+ type = unwrapBoxAndRef(type);
+ return !type.isa<fir::SequenceType>() && fir::isa_char(type);
+}
+
+bool fir::factory::CharacterExprHelper::isArray(mlir::Type type) {
+ type = unwrapBoxAndRef(type);
+ if (auto seqTy = type.dyn_cast<fir::SequenceType>())
+ return fir::isa_char(seqTy.getEleTy());
+ return false;
}
-/// Get fir.char<kind> type with the same kind as inside str.
fir::CharacterType
fir::factory::CharacterExprHelper::getCharacterType(mlir::Type type) {
assert(isCharacterScalar(type) && "expected scalar character");
// If the embox is accessible, use its operand to avoid filling
// the generated fir with embox/unbox.
mlir::Value boxCharLen;
- if (auto *definingOp = character.getDefiningOp()) {
- if (auto box = dyn_cast<fir::EmboxCharOp>(definingOp)) {
+ if (auto definingOp = character.getDefiningOp()) {
+ if (auto box = mlir::dyn_cast<fir::EmboxCharOp>(definingOp)) {
base = box.getMemref();
boxCharLen = box.getLen();
}
auto lenType = builder.getCharacterLengthType();
auto len = builder.createConvert(loc, lenType, box.getLen());
for (auto extent : box.getExtents())
- len = builder.create<arith::MulIOp>(
+ len = builder.create<mlir::arith::MulIOp>(
loc, len, builder.createConvert(loc, lenType, extent));
// TODO: typeLen can be improved in compiled constant cases
return buff;
}
-/// Get the LLVM intrinsic for `memcpy`. Use the 64 bit version.
-mlir::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
- auto ptrTy = builder.getRefType(builder.getIntegerType(8));
- llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
- builder.getI1Type()};
- auto memcpyTy =
- mlir::FunctionType::get(builder.getContext(), args, llvm::None);
- return builder.addNamedFunction(builder.getUnknownLoc(),
- "llvm.memcpy.p0i8.p0i8.i64", memcpyTy);
-}
-
-/// Get the LLVM intrinsic for `memmove`. Use the 64 bit version.
-mlir::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
- auto ptrTy = builder.getRefType(builder.getIntegerType(8));
- llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
- builder.getI1Type()};
- auto memmoveTy =
- mlir::FunctionType::get(builder.getContext(), args, llvm::None);
- return builder.addNamedFunction(builder.getUnknownLoc(),
- "llvm.memmove.p0i8.p0i8.i64", memmoveTy);
-}
-
-/// Get the LLVM intrinsic for `memset`. Use the 64 bit version.
-mlir::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
- auto ptrTy = builder.getRefType(builder.getIntegerType(8));
- llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
- builder.getI1Type()};
- auto memsetTy =
- mlir::FunctionType::get(builder.getContext(), args, llvm::None);
- return builder.addNamedFunction(builder.getUnknownLoc(),
- "llvm.memset.p0i8.p0i8.i64", memsetTy);
-}
-
-/// Get the standard `realloc` function.
-mlir::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
- auto ptrTy = builder.getRefType(builder.getIntegerType(8));
- llvm::SmallVector<mlir::Type> args = {ptrTy, builder.getI64Type()};
- auto reallocTy = mlir::FunctionType::get(builder.getContext(), args, {ptrTy});
- return builder.addNamedFunction(builder.getUnknownLoc(), "realloc",
- reallocTy);
-}
-
/// Create a loop to copy `count` characters from `src` to `dest`. Note that the
/// KIND indicates the number of bits in a code point. (ASCII, UCS-2, or UCS-4.)
void fir::factory::CharacterExprHelper::createCopy(
auto i64Ty = builder.getI64Type();
auto kindBytes = builder.createIntegerConstant(loc, i64Ty, bytes);
auto castCount = builder.createConvert(loc, i64Ty, count);
- auto totalBytes = builder.create<arith::MulIOp>(loc, kindBytes, castCount);
+ auto totalBytes =
+ builder.create<mlir::arith::MulIOp>(loc, kindBytes, castCount);
auto notVolatile = builder.createBool(loc, false);
auto memmv = getLlvmMemmove(builder);
auto argTys = memmv.getFunctionType().getInputs();
/// Returns the minimum of integer mlir::Value \p a and \b.
mlir::Value genMin(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value a, mlir::Value b) {
- auto cmp =
- builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt, a, b);
+ auto cmp = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::slt, a, b);
return builder.create<mlir::arith::SelectOp>(loc, cmp, a, b);
}
// Pad if needed.
if (!compileTimeSameLength) {
auto one = builder.createIntegerConstant(loc, lhs.getLen().getType(), 1);
- auto maxPadding = builder.create<arith::SubIOp>(loc, lhs.getLen(), one);
+ auto maxPadding =
+ builder.create<mlir::arith::SubIOp>(loc, lhs.getLen(), one);
createPadding(lhs, copyCount, maxPadding);
}
}
lhs.getLen());
auto rhsLen = builder.createConvert(loc, builder.getCharacterLengthType(),
rhs.getLen());
- mlir::Value len = builder.create<arith::AddIOp>(loc, lhsLen, rhsLen);
+ mlir::Value len = builder.create<mlir::arith::AddIOp>(loc, lhsLen, rhsLen);
auto temp = createCharacterTemp(getCharacterType(rhs), len);
createCopy(temp, lhs, lhsLen);
auto one = builder.createIntegerConstant(loc, len.getType(), 1);
- auto upperBound = builder.create<arith::SubIOp>(loc, len, one);
+ auto upperBound = builder.create<mlir::arith::SubIOp>(loc, len, one);
auto lhsLenIdx = builder.createConvert(loc, builder.getIndexType(), lhsLen);
auto fromBuff = getCharBoxBuffer(rhs);
auto toBuff = getCharBoxBuffer(temp);
fir::factory::DoLoopHelper{builder, loc}.createLoop(
lhsLenIdx, upperBound, one,
[&](fir::FirOpBuilder &bldr, mlir::Value index) {
- auto rhsIndex = bldr.create<arith::SubIOp>(loc, index, lhsLenIdx);
+ auto rhsIndex = bldr.create<mlir::arith::SubIOp>(loc, index, lhsLenIdx);
auto charVal = createLoadCharAt(fromBuff, rhsIndex);
createStoreCharAt(toBuff, index, charVal);
});
auto lowerBound = castBounds[0];
// FIR CoordinateOp is zero based but Fortran substring are one based.
auto one = builder.createIntegerConstant(loc, lowerBound.getType(), 1);
- auto offset = builder.create<arith::SubIOp>(loc, lowerBound, one).getResult();
+ auto offset =
+ builder.create<mlir::arith::SubIOp>(loc, lowerBound, one).getResult();
auto addr = createElementAddr(box.getBuffer(), offset);
auto kind = getCharacterKind(box.getBuffer().getType());
auto charTy = fir::CharacterType::getUnknownLen(builder.getContext(), kind);
mlir::Value substringLen;
if (nbounds < 2) {
substringLen =
- builder.create<arith::SubIOp>(loc, box.getLen(), castBounds[0]);
+ builder.create<mlir::arith::SubIOp>(loc, box.getLen(), castBounds[0]);
} else {
substringLen =
- builder.create<arith::SubIOp>(loc, castBounds[1], castBounds[0]);
+ builder.create<mlir::arith::SubIOp>(loc, castBounds[1], castBounds[0]);
}
- substringLen = builder.create<arith::AddIOp>(loc, substringLen, one);
+ substringLen = builder.create<mlir::arith::AddIOp>(loc, substringLen, one);
// Set length to zero if bounds were reversed (Fortran 2018 9.4.1)
auto zero = builder.createIntegerConstant(loc, substringLen.getType(), 0);
- auto cdt = builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::slt,
- substringLen, zero);
+ auto cdt = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::slt, substringLen, zero);
substringLen =
builder.create<mlir::arith::SelectOp>(loc, cdt, zero, substringLen);
auto zero = builder.createIntegerConstant(loc, indexType, 0);
auto trueVal = builder.createIntegerConstant(loc, builder.getI1Type(), 1);
auto blank = createBlankConstantCode(getCharacterType(str));
- mlir::Value lastChar = builder.create<arith::SubIOp>(loc, len, one);
+ mlir::Value lastChar = builder.create<mlir::arith::SubIOp>(loc, len, one);
auto iterWhile =
builder.create<fir::IterWhileOp>(loc, lastChar, zero, minusOne, trueVal,
auto codeAddr =
builder.createConvert(loc, builder.getRefType(blank.getType()), elemAddr);
auto c = builder.create<fir::LoadOp>(loc, codeAddr);
- auto isBlank =
- builder.create<arith::CmpIOp>(loc, arith::CmpIPredicate::eq, blank, c);
+ auto isBlank = builder.create<mlir::arith::CmpIOp>(
+ loc, mlir::arith::CmpIPredicate::eq, blank, c);
llvm::SmallVector<mlir::Value> results = {isBlank, index};
builder.create<fir::ResultOp>(loc, results);
builder.restoreInsertionPoint(insPt);
// Compute length after iteration (zero if all blanks)
mlir::Value newLen =
- builder.create<arith::AddIOp>(loc, iterWhile.getResult(1), one);
+ builder.create<mlir::arith::AddIOp>(loc, iterWhile.getResult(1), one);
auto result = builder.create<mlir::arith::SelectOp>(
loc, iterWhile.getResult(0), zero, newLen);
return builder.createConvert(loc, builder.getCharacterLengthType(), result);
return false;
}
-bool fir::factory::CharacterExprHelper::isCharacterScalar(mlir::Type type) {
- if (type.isa<fir::BoxCharType>())
- return true;
- type = fir::unwrapRefType(type);
- if (auto boxTy = type.dyn_cast<fir::BoxType>())
- type = boxTy.getEleTy();
- type = fir::unwrapRefType(type);
- return !type.isa<fir::SequenceType>() && fir::isa_char(type);
-}
-
fir::KindTy
fir::factory::CharacterExprHelper::getCharacterKind(mlir::Type type) {
assert(isCharacterScalar(type) && "expected scalar character");
return recoverCharacterType(type).getFKind();
}
-bool fir::factory::CharacterExprHelper::isArray(mlir::Type type) {
- return !isCharacterScalar(type);
-}
-
bool fir::factory::CharacterExprHelper::hasConstantLengthInType(
const fir::ExtendedValue &exv) {
auto charTy = recoverCharacterType(fir::getBase(exv).getType());
auto width = bits / 8;
if (width > 1) {
auto widthVal = builder.createIntegerConstant(loc, lenTy, width);
- return builder.create<arith::DivSIOp>(loc, size, widthVal);
+ return builder.create<mlir::arith::DivSIOp>(loc, size, widthVal);
}
return size;
}
loc, tupleType.getType(0), tuple,
builder.getArrayAttr(
{builder.getIntegerAttr(builder.getIndexType(), 0)}));
+ mlir::Value proc = [&]() -> mlir::Value {
+ if (auto addrTy = addr.getType().dyn_cast<fir::BoxProcType>())
+ return builder.create<fir::BoxAddrOp>(loc, addrTy.getEleTy(), addr);
+ return addr;
+ }();
mlir::Value len = builder.create<fir::ExtractValueOp>(
loc, tupleType.getType(1), tuple,
builder.getArrayAttr(
{builder.getIntegerAttr(builder.getIndexType(), 1)}));
- return {addr, len};
+ return {proc, len};
}
mlir::Value fir::factory::createCharacterProcedureTuple(
return tuple;
}
-bool fir::factory::isCharacterProcedureTuple(mlir::Type ty) {
- mlir::TupleType tuple = ty.dyn_cast<mlir::TupleType>();
- return tuple && tuple.size() == 2 &&
- tuple.getType(0).isa<mlir::FunctionType>() &&
- fir::isa_integer(tuple.getType(1));
-}
-
mlir::Type
fir::factory::getCharacterProcedureTupleType(mlir::Type funcPointerType) {
mlir::MLIRContext *context = funcPointerType.getContext();
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
-static constexpr std::size_t nameLengthHashSize = 32;
+static llvm::cl::opt<std::size_t>
+ nameLengthHashSize("length-to-hash-string-literal",
+ llvm::cl::desc("string literals that exceed this length"
+ " will use a hash value as their symbol "
+ "name"),
+ llvm::cl::init(32));
mlir::FuncOp fir::FirOpBuilder::createFunction(mlir::Location loc,
mlir::ModuleOp module,
return create<fir::LoadOp>(
loc, fir::factory::getMutableIRBox(*this, loc, x));
},
- // UnboxedValue, ProcBoxValue or BoxValue.
[&](const auto &) -> mlir::Value {
return create<fir::EmboxOp>(loc, boxTy, itemAddr);
});
}
+void fir::FirOpBuilder::dumpFunc() { getFunction().dump(); }
+
static mlir::Value
genNullPointerComparison(fir::FirOpBuilder &builder, mlir::Location loc,
mlir::Value addr,
.getResult(1);
},
[&](const fir::MutableBoxValue &x) -> mlir::Value {
- // MutableBoxValue must be read into another category to work with them
- // outside of allocation/assignment contexts.
- fir::emitFatalError(loc, "readExtents on MutableBoxValue");
+ return readExtent(builder, loc,
+ fir::factory::genMutableBoxRead(builder, loc, x),
+ dim);
},
[&](const auto &) -> mlir::Value {
fir::emitFatalError(loc, "extent inquiry on scalar");
return fir::factory::componentToExtendedValue(builder, loc, element);
}
-mlir::TupleType
-fir::factory::getRaggedArrayHeaderType(fir::FirOpBuilder &builder) {
- mlir::IntegerType i64Ty = builder.getIntegerType(64);
- auto arrTy = fir::SequenceType::get(builder.getIntegerType(8), 1);
- auto buffTy = fir::HeapType::get(arrTy);
- auto extTy = fir::SequenceType::get(i64Ty, 1);
- auto shTy = fir::HeapType::get(extTy);
- return mlir::TupleType::get(builder.getContext(), {i64Ty, buffTy, shTy});
-}
-
-mlir::Value fir::factory::createZeroValue(fir::FirOpBuilder &builder,
- mlir::Location loc, mlir::Type type) {
- mlir::Type i1 = builder.getIntegerType(1);
- if (type.isa<fir::LogicalType>() || type == i1)
- return builder.createConvert(loc, type, builder.createBool(loc, false));
- if (fir::isa_integer(type))
- return builder.createIntegerConstant(loc, type, 0);
- if (fir::isa_real(type))
- return builder.createRealZeroConstant(loc, type);
- if (fir::isa_complex(type)) {
- fir::factory::Complex complexHelper(builder, loc);
- mlir::Type partType = complexHelper.getComplexPartType(type);
- mlir::Value zeroPart = builder.createRealZeroConstant(loc, partType);
- return complexHelper.createComplex(type, zeroPart, zeroPart);
- }
- fir::emitFatalError(loc, "internal: trying to generate zero value of non "
- "numeric or logical type");
-}
-
void fir::factory::genScalarAssignment(fir::FirOpBuilder &builder,
mlir::Location loc,
const fir::ExtendedValue &lhs,
genComponentByComponentAssignment(builder, loc, lhs, rhs);
}
+mlir::TupleType
+fir::factory::getRaggedArrayHeaderType(fir::FirOpBuilder &builder) {
+ mlir::IntegerType i64Ty = builder.getIntegerType(64);
+ auto arrTy = fir::SequenceType::get(builder.getIntegerType(8), 1);
+ auto buffTy = fir::HeapType::get(arrTy);
+ auto extTy = fir::SequenceType::get(i64Ty, 1);
+ auto shTy = fir::HeapType::get(extTy);
+ return mlir::TupleType::get(builder.getContext(), {i64Ty, buffTy, shTy});
+}
+
mlir::Value fir::factory::genLenOfCharacter(
fir::FirOpBuilder &builder, mlir::Location loc, fir::ArrayLoadOp arrLoad,
llvm::ArrayRef<mlir::Value> path, llvm::ArrayRef<mlir::Value> substring) {
}
TODO(loc, "LEN of character must be computed at runtime");
}
+
+mlir::Value fir::factory::createZeroValue(fir::FirOpBuilder &builder,
+ mlir::Location loc, mlir::Type type) {
+ mlir::Type i1 = builder.getIntegerType(1);
+ if (type.isa<fir::LogicalType>() || type == i1)
+ return builder.createConvert(loc, type, builder.createBool(loc, false));
+ if (fir::isa_integer(type))
+ return builder.createIntegerConstant(loc, type, 0);
+ if (fir::isa_real(type))
+ return builder.createRealZeroConstant(loc, type);
+ if (fir::isa_complex(type)) {
+ fir::factory::Complex complexHelper(builder, loc);
+ mlir::Type partType = complexHelper.getComplexPartType(type);
+ mlir::Value zeroPart = builder.createRealZeroConstant(loc, partType);
+ return complexHelper.createComplex(type, zeroPart, zeroPart);
+ }
+ fir::emitFatalError(loc, "internal: trying to generate zero value of non "
+ "numeric or logical type");
+}
#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
#include "flang/Optimizer/Builder/FIRBuilder.h"
+mlir::FuncOp fir::factory::getLlvmMemcpy(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
+ builder.getI1Type()};
+ auto memcpyTy =
+ mlir::FunctionType::get(builder.getContext(), args, llvm::None);
+ return builder.addNamedFunction(builder.getUnknownLoc(),
+ "llvm.memcpy.p0i8.p0i8.i64", memcpyTy);
+}
+
+mlir::FuncOp fir::factory::getLlvmMemmove(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
+ builder.getI1Type()};
+ auto memmoveTy =
+ mlir::FunctionType::get(builder.getContext(), args, llvm::None);
+ return builder.addNamedFunction(builder.getUnknownLoc(),
+ "llvm.memmove.p0i8.p0i8.i64", memmoveTy);
+}
+
+mlir::FuncOp fir::factory::getLlvmMemset(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ llvm::SmallVector<mlir::Type> args = {ptrTy, ptrTy, builder.getI64Type(),
+ builder.getI1Type()};
+ auto memsetTy =
+ mlir::FunctionType::get(builder.getContext(), args, llvm::None);
+ return builder.addNamedFunction(builder.getUnknownLoc(),
+ "llvm.memset.p0i8.p0i8.i64", memsetTy);
+}
+
+mlir::FuncOp fir::factory::getRealloc(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ llvm::SmallVector<mlir::Type> args = {ptrTy, builder.getI64Type()};
+ auto reallocTy = mlir::FunctionType::get(builder.getContext(), args, {ptrTy});
+ return builder.addNamedFunction(builder.getUnknownLoc(), "realloc",
+ reallocTy);
+}
+
mlir::FuncOp fir::factory::getLlvmStackSave(fir::FirOpBuilder &builder) {
auto ptrTy = builder.getRefType(builder.getIntegerType(8));
auto funcTy =
return builder.addNamedFunction(builder.getUnknownLoc(), "llvm.stackrestore",
funcTy);
}
+
+mlir::FuncOp fir::factory::getLlvmInitTrampoline(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ auto funcTy = mlir::FunctionType::get(builder.getContext(),
+ {ptrTy, ptrTy, ptrTy}, llvm::None);
+ return builder.addNamedFunction(builder.getUnknownLoc(),
+ "llvm.init.trampoline", funcTy);
+}
+
+mlir::FuncOp fir::factory::getLlvmAdjustTrampoline(fir::FirOpBuilder &builder) {
+ auto ptrTy = builder.getRefType(builder.getIntegerType(8));
+ auto funcTy = mlir::FunctionType::get(builder.getContext(), {ptrTy}, {ptrTy});
+ return builder.addNamedFunction(builder.getUnknownLoc(),
+ "llvm.adjust.trampoline", funcTy);
+}
/// Update the IR box (fir.ref<fir.box<T>>) of the MutableBoxValue.
void updateIRBox(mlir::Value addr, mlir::ValueRange lbounds,
mlir::ValueRange extents, mlir::ValueRange lengths) {
- mlir::Value shape;
- if (!extents.empty()) {
- if (lbounds.empty()) {
- auto shapeType =
- fir::ShapeType::get(builder.getContext(), extents.size());
- shape = builder.create<fir::ShapeOp>(loc, shapeType, extents);
- } else {
- llvm::SmallVector<mlir::Value> shapeShiftBounds;
- for (auto [lb, extent] : llvm::zip(lbounds, extents)) {
- shapeShiftBounds.emplace_back(lb);
- shapeShiftBounds.emplace_back(extent);
- }
- auto shapeShiftType =
- fir::ShapeShiftType::get(builder.getContext(), extents.size());
- shape = builder.create<fir::ShapeShiftOp>(loc, shapeShiftType,
- shapeShiftBounds);
- }
- }
- mlir::Value emptySlice;
- // Ignore lengths if already constant in the box type (this would trigger an
- // error in the embox).
- llvm::SmallVector<mlir::Value> cleanedLengths;
- mlir::Value irBox;
- if (addr.getType().isa<fir::BoxType>()) {
- // The entity is already boxed.
- irBox = builder.createConvert(loc, box.getBoxTy(), addr);
- } else {
- auto cleanedAddr = addr;
- if (auto charTy = box.getEleTy().dyn_cast<fir::CharacterType>()) {
- // Cast address to box type so that both input and output type have
- // unknown or constant lengths.
- auto bt = box.getBaseTy();
- auto addrTy = addr.getType();
- auto type = addrTy.isa<fir::HeapType>() ? fir::HeapType::get(bt)
- : addrTy.isa<fir::PointerType>() ? fir::PointerType::get(bt)
- : builder.getRefType(bt);
- cleanedAddr = builder.createConvert(loc, type, addr);
- if (charTy.getLen() == fir::CharacterType::unknownLen())
- cleanedLengths.append(lengths.begin(), lengths.end());
- } else if (box.isDerivedWithLengthParameters()) {
- TODO(loc, "updating mutablebox of derived type with length parameters");
- cleanedLengths = lengths;
- }
- irBox = builder.create<fir::EmboxOp>(loc, box.getBoxTy(), cleanedAddr,
- shape, emptySlice, cleanedLengths);
- }
+ mlir::Value irBox =
+ createNewFirBox(builder, loc, box, addr, lbounds, extents, lengths);
builder.create<fir::StoreOp>(loc, irBox, box.getAddr());
}
mlir::ValueRange extents,
mlir::ValueRange lenParams,
llvm::StringRef allocName) {
- auto idxTy = builder.getIndexType();
- llvm::SmallVector<mlir::Value> lengths;
- if (auto charTy = box.getEleTy().dyn_cast<fir::CharacterType>()) {
- if (charTy.getLen() == fir::CharacterType::unknownLen()) {
- if (box.hasNonDeferredLenParams())
- lengths.emplace_back(
- builder.createConvert(loc, idxTy, box.nonDeferredLenParams()[0]));
- else if (!lenParams.empty())
- lengths.emplace_back(builder.createConvert(loc, idxTy, lenParams[0]));
- else
- fir::emitFatalError(
- loc, "could not deduce character lengths in character allocation");
- }
- }
- mlir::Value heap = builder.create<fir::AllocMemOp>(
- loc, box.getBaseTy(), allocName, lengths, extents);
- // TODO: run initializer if any. Currently, there is no way to know this is
- // required here.
+ auto lengths = getNewLengths(builder, loc, box, lenParams);
+ auto heap = builder.create<fir::AllocMemOp>(loc, box.getBaseTy(), allocName,
+ lengths, extents);
MutablePropertyWriter{builder, loc, box}.updateMutableBox(heap, lbounds,
extents, lengths);
+ if (box.getEleTy().isa<fir::RecordType>()) {
+ // TODO: skip runtime initialization if this is not required. Currently,
+ // there is no way to know here if a derived type needs it or not. But the
+ // information is available at compile time and could be reflected here
+ // somehow.
+ mlir::Value irBox = fir::factory::getMutableIRBox(builder, loc, box);
+ fir::runtime::genDerivedTypeInitialize(builder, loc, irBox);
+ }
}
void fir::factory::genInlinedDeallocate(fir::FirOpBuilder &builder,
--- /dev/null
+//===-- BoxedProcedure.cpp ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "PassDetail.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/LowLevelIntrinsics.h"
+#include "flang/Optimizer/CodeGen/CodeGen.h"
+#include "flang/Optimizer/Dialect/FIRDialect.h"
+#include "flang/Optimizer/Dialect/FIROps.h"
+#include "flang/Optimizer/Dialect/FIRType.h"
+#include "flang/Optimizer/Support/FIRContext.h"
+#include "flang/Optimizer/Support/FatalError.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Pass/Pass.h"
+#include "mlir/Transforms/DialectConversion.h"
+
+#define DEBUG_TYPE "flang-procedure-pointer"
+
+using namespace fir;
+
+namespace {
+/// Options to the procedure pointer pass.
+struct BoxedProcedureOptions {
+ // Lower the boxproc abstraction to function pointers and thunks where
+ // required.
+ bool useThunks = true;
+};
+
+/// This type converter rewrites all `!fir.boxproc<Func>` types to `Func` types.
+class BoxprocTypeRewriter : public mlir::TypeConverter {
+public:
+ using mlir::TypeConverter::convertType;
+
+ /// Does the type \p ty need to be converted?
+ /// Any type that is a `!fir.boxproc` in whole or in part will need to be
+ /// converted to a function type to lower the IR to function pointer form in
+ /// the default implementation performed in this pass. Other implementations
+ /// are possible, so those may convert `!fir.boxproc` to some other type or
+ /// not at all depending on the implementation target's characteristics and
+ /// preference.
+ bool needsConversion(mlir::Type ty) {
+ if (ty.isa<BoxProcType>())
+ return true;
+ if (auto funcTy = ty.dyn_cast<mlir::FunctionType>()) {
+ for (auto t : funcTy.getInputs())
+ if (needsConversion(t))
+ return true;
+ for (auto t : funcTy.getResults())
+ if (needsConversion(t))
+ return true;
+ return false;
+ }
+ if (auto tupleTy = ty.dyn_cast<mlir::TupleType>()) {
+ for (auto t : tupleTy.getTypes())
+ if (needsConversion(t))
+ return true;
+ return false;
+ }
+ if (auto recTy = ty.dyn_cast<RecordType>()) {
+ bool result = false;
+ visitedTypes.push_back(recTy);
+ for (auto t : recTy.getTypeList()) {
+ if (llvm::any_of(visitedTypes,
+ [&](mlir::Type rt) { return rt == recTy; }))
+ continue;
+ if (needsConversion(t.second)) {
+ result = true;
+ break;
+ }
+ }
+ visitedTypes.pop_back();
+ return result;
+ }
+ if (auto boxTy = ty.dyn_cast<BoxType>())
+ return needsConversion(boxTy.getEleTy());
+ if (isa_ref_type(ty))
+ return needsConversion(unwrapRefType(ty));
+ if (auto t = ty.dyn_cast<SequenceType>())
+ return needsConversion(unwrapSequenceType(ty));
+ return false;
+ }
+
+ BoxprocTypeRewriter() {
+ addConversion([](mlir::Type ty) { return ty; });
+ addConversion([](BoxProcType boxproc) { return boxproc.getEleTy(); });
+ addConversion([&](mlir::TupleType tupTy) {
+ llvm::SmallVector<mlir::Type> memTys;
+ for (auto ty : tupTy.getTypes())
+ memTys.push_back(convertType(ty));
+ return mlir::TupleType::get(tupTy.getContext(), memTys);
+ });
+ addConversion([&](mlir::FunctionType funcTy) {
+ llvm::SmallVector<mlir::Type> inTys;
+ llvm::SmallVector<mlir::Type> resTys;
+ for (auto ty : funcTy.getInputs())
+ inTys.push_back(convertType(ty));
+ for (auto ty : funcTy.getResults())
+ resTys.push_back(convertType(ty));
+ return mlir::FunctionType::get(funcTy.getContext(), inTys, resTys);
+ });
+ addConversion([&](ReferenceType ty) {
+ return ReferenceType::get(convertType(ty.getEleTy()));
+ });
+ addConversion([&](PointerType ty) {
+ return PointerType::get(convertType(ty.getEleTy()));
+ });
+ addConversion(
+ [&](HeapType ty) { return HeapType::get(convertType(ty.getEleTy())); });
+ addConversion(
+ [&](BoxType ty) { return BoxType::get(convertType(ty.getEleTy())); });
+ addConversion([&](SequenceType ty) {
+ // TODO: add ty.getLayoutMap() as needed.
+ return SequenceType::get(ty.getShape(), convertType(ty.getEleTy()));
+ });
+ addConversion([&](RecordType ty) {
+ // FIR record types can have recursive references, so conversion is a bit
+ // more complex than the other types. This conversion is not needed
+ // presently, so just emit a TODO message. Need to consider the uniqued
+ // name of the record, etc.
+ fir::emitFatalError(
+ mlir::UnknownLoc::get(ty.getContext()),
+ "not yet implemented: record type with a boxproc type");
+ return RecordType::get(ty.getContext(), "*fixme*");
+ });
+ addArgumentMaterialization(materializeProcedure);
+ addSourceMaterialization(materializeProcedure);
+ addTargetMaterialization(materializeProcedure);
+ }
+
+ static mlir::Value materializeProcedure(mlir::OpBuilder &builder,
+ BoxProcType type,
+ mlir::ValueRange inputs,
+ mlir::Location loc) {
+ assert(inputs.size() == 1);
+ return builder.create<ConvertOp>(loc, unwrapRefType(type.getEleTy()),
+ inputs[0]);
+ }
+
+private:
+ llvm::SmallVector<mlir::Type> visitedTypes;
+};
+
+/// A `boxproc` is an abstraction for a Fortran procedure reference. Typically,
+/// Fortran procedures can be referenced directly through a function pointer.
+/// However, Fortran has one-level dynamic scoping between a host procedure and
+/// its internal procedures. This allows internal procedures to directly access
+/// and modify the state of the host procedure's variables.
+///
+/// There are any number of possible implementations possible.
+///
+/// The implementation used here is to convert `boxproc` values to function
+/// pointers everywhere. If a `boxproc` value includes a frame pointer to the
+/// host procedure's data, then a thunk will be created at runtime to capture
+/// the frame pointer during execution. In LLVM IR, the frame pointer is
+/// designated with the `nest` attribute. The thunk's address will then be used
+/// as the call target instead of the original function's address directly.
+class BoxedProcedurePass : public BoxedProcedurePassBase<BoxedProcedurePass> {
+public:
+ BoxedProcedurePass() { options = {true}; }
+ BoxedProcedurePass(bool useThunks) { options = {useThunks}; }
+
+ inline mlir::ModuleOp getModule() { return getOperation(); }
+
+ void runOnOperation() override final {
+ if (options.useThunks) {
+ auto *context = &getContext();
+ mlir::IRRewriter rewriter(context);
+ BoxprocTypeRewriter typeConverter;
+ mlir::Dialect *firDialect = context->getLoadedDialect("fir");
+ getModule().walk([&](mlir::Operation *op) {
+ if (auto addr = mlir::dyn_cast<BoxAddrOp>(op)) {
+ auto ty = addr.getVal().getType();
+ if (typeConverter.needsConversion(ty) ||
+ ty.isa<mlir::FunctionType>()) {
+ // Rewrite all `fir.box_addr` ops on values of type `!fir.boxproc`
+ // or function type to be `fir.convert` ops.
+ rewriter.setInsertionPoint(addr);
+ rewriter.replaceOpWithNewOp<ConvertOp>(
+ addr, typeConverter.convertType(addr.getType()), addr.getVal());
+ }
+ } else if (auto func = mlir::dyn_cast<mlir::FuncOp>(op)) {
+ mlir::FunctionType ty = func.getFunctionType();
+ if (typeConverter.needsConversion(ty)) {
+ rewriter.startRootUpdate(func);
+ auto toTy =
+ typeConverter.convertType(ty).cast<mlir::FunctionType>();
+ if (!func.empty())
+ for (auto e : llvm::enumerate(toTy.getInputs())) {
+ unsigned i = e.index();
+ auto &block = func.front();
+ block.insertArgument(i, e.value(), func.getLoc());
+ block.getArgument(i + 1).replaceAllUsesWith(
+ block.getArgument(i));
+ block.eraseArgument(i + 1);
+ }
+ func.setType(toTy);
+ rewriter.finalizeRootUpdate(func);
+ }
+ } else if (auto embox = mlir::dyn_cast<EmboxProcOp>(op)) {
+ // Rewrite all `fir.emboxproc` ops to either `fir.convert` or a thunk
+ // as required.
+ mlir::Type toTy = embox.getType().cast<BoxProcType>().getEleTy();
+ rewriter.setInsertionPoint(embox);
+ if (embox.getHost()) {
+ // Create the thunk.
+ auto module = embox->getParentOfType<mlir::ModuleOp>();
+ FirOpBuilder builder(rewriter, getKindMapping(module));
+ auto loc = embox.getLoc();
+ mlir::Type i8Ty = builder.getI8Type();
+ mlir::Type i8Ptr = builder.getRefType(i8Ty);
+ mlir::Type buffTy = SequenceType::get({32}, i8Ty);
+ auto buffer = builder.create<AllocaOp>(loc, buffTy);
+ mlir::Value closure =
+ builder.createConvert(loc, i8Ptr, embox.getHost());
+ mlir::Value tramp = builder.createConvert(loc, i8Ptr, buffer);
+ mlir::Value func =
+ builder.createConvert(loc, i8Ptr, embox.getFunc());
+ builder.create<fir::CallOp>(
+ loc, factory::getLlvmInitTrampoline(builder),
+ llvm::ArrayRef<mlir::Value>{tramp, func, closure});
+ auto adjustCall = builder.create<fir::CallOp>(
+ loc, factory::getLlvmAdjustTrampoline(builder),
+ llvm::ArrayRef<mlir::Value>{tramp});
+ rewriter.replaceOpWithNewOp<ConvertOp>(embox, toTy,
+ adjustCall.getResult(0));
+ } else {
+ // Just forward the function as a pointer.
+ rewriter.replaceOpWithNewOp<ConvertOp>(embox, toTy,
+ embox.getFunc());
+ }
+ } else if (auto mem = mlir::dyn_cast<AllocaOp>(op)) {
+ auto ty = mem.getType();
+ if (typeConverter.needsConversion(ty)) {
+ rewriter.setInsertionPoint(mem);
+ auto toTy = typeConverter.convertType(unwrapRefType(ty));
+ bool isPinned = mem.getPinned();
+ llvm::StringRef uniqName;
+ if (mem.getUniqName().hasValue())
+ uniqName = mem.getUniqName().getValue();
+ llvm::StringRef bindcName;
+ if (mem.getBindcName().hasValue())
+ bindcName = mem.getBindcName().getValue();
+ rewriter.replaceOpWithNewOp<AllocaOp>(
+ mem, toTy, uniqName, bindcName, isPinned, mem.getTypeparams(),
+ mem.getShape());
+ }
+ } else if (auto mem = mlir::dyn_cast<AllocMemOp>(op)) {
+ auto ty = mem.getType();
+ if (typeConverter.needsConversion(ty)) {
+ rewriter.setInsertionPoint(mem);
+ auto toTy = typeConverter.convertType(unwrapRefType(ty));
+ llvm::StringRef uniqName;
+ if (mem.getUniqName().hasValue())
+ uniqName = mem.getUniqName().getValue();
+ llvm::StringRef bindcName;
+ if (mem.getBindcName().hasValue())
+ bindcName = mem.getBindcName().getValue();
+ rewriter.replaceOpWithNewOp<AllocMemOp>(
+ mem, toTy, uniqName, bindcName, mem.getTypeparams(),
+ mem.getShape());
+ }
+ } else if (auto coor = mlir::dyn_cast<CoordinateOp>(op)) {
+ auto ty = coor.getType();
+ mlir::Type baseTy = coor.getBaseType();
+ if (typeConverter.needsConversion(ty) ||
+ typeConverter.needsConversion(baseTy)) {
+ rewriter.setInsertionPoint(coor);
+ auto toTy = typeConverter.convertType(ty);
+ auto toBaseTy = typeConverter.convertType(baseTy);
+ rewriter.replaceOpWithNewOp<CoordinateOp>(coor, toTy, coor.getRef(),
+ coor.getCoor(), toBaseTy);
+ }
+ } else if (auto index = mlir::dyn_cast<FieldIndexOp>(op)) {
+ auto ty = index.getType();
+ mlir::Type onTy = index.getOnType();
+ if (typeConverter.needsConversion(ty) ||
+ typeConverter.needsConversion(onTy)) {
+ rewriter.setInsertionPoint(index);
+ auto toTy = typeConverter.convertType(ty);
+ auto toOnTy = typeConverter.convertType(onTy);
+ rewriter.replaceOpWithNewOp<FieldIndexOp>(
+ index, toTy, index.getFieldId(), toOnTy, index.getTypeparams());
+ }
+ } else if (auto index = mlir::dyn_cast<LenParamIndexOp>(op)) {
+ auto ty = index.getType();
+ mlir::Type onTy = index.getOnType();
+ if (typeConverter.needsConversion(ty) ||
+ typeConverter.needsConversion(onTy)) {
+ rewriter.setInsertionPoint(index);
+ auto toTy = typeConverter.convertType(ty);
+ auto toOnTy = typeConverter.convertType(onTy);
+ rewriter.replaceOpWithNewOp<LenParamIndexOp>(
+ mem, toTy, index.getFieldId(), toOnTy);
+ }
+ } else if (op->getDialect() == firDialect) {
+ rewriter.startRootUpdate(op);
+ for (auto i : llvm::enumerate(op->getResultTypes()))
+ if (typeConverter.needsConversion(i.value())) {
+ auto toTy = typeConverter.convertType(i.value());
+ op->getResult(i.index()).setType(toTy);
+ }
+ rewriter.finalizeRootUpdate(op);
+ }
+ });
+ }
+ // TODO: any alternative implementation. Note: currently, the default code
+ // gen will not be able to handle boxproc and will give an error.
+ }
+
+private:
+ BoxedProcedureOptions options;
+};
+} // namespace
+
+std::unique_ptr<mlir::Pass> fir::createBoxedProcedurePass() {
+ return std::make_unique<BoxedProcedurePass>();
+}
+
+std::unique_ptr<mlir::Pass> fir::createBoxedProcedurePass(bool useThunks) {
+ return std::make_unique<BoxedProcedurePass>(useThunks);
+}
add_flang_library(FIRCodeGen
+ BoxedProcedure.cpp
CGOps.cpp
CodeGen.cpp
PreCGRewrite.cpp
#include "Target.h"
#include "flang/Lower/Todo.h"
#include "flang/Optimizer/Builder/Character.h"
+#include "flang/Optimizer/Builder/FIRBuilder.h"
#include "flang/Optimizer/CodeGen/CodeGen.h"
#include "flang/Optimizer/Dialect/FIRDialect.h"
#include "flang/Optimizer/Dialect/FIROps.h"
if (!forcedTargetTriple.empty())
setTargetTriple(mod, forcedTargetTriple);
- auto specifics = CodeGenSpecifics::get(getOperation().getContext(),
- getTargetTriple(getOperation()),
- getKindMapping(getOperation()));
+ auto specifics = CodeGenSpecifics::get(
+ mod.getContext(), getTargetTriple(mod), getKindMapping(mod));
setMembers(specifics.get(), &rewriter);
// Perform type conversion on signatures and call sites.
rewriteCallComplexInputType(cmplx, oper, newInTys, newOpers);
})
.template Case<mlir::TupleType>([&](mlir::TupleType tuple) {
- if (factory::isCharacterProcedureTuple(tuple)) {
+ if (isCharacterProcedureTuple(tuple)) {
mlir::ModuleOp module = getModule();
if constexpr (std::is_same_v<std::decay_t<A>, fir::CallOp>) {
if (callOp.getCallee()) {
llvm::StringRef charProcAttr =
- fir::getCharacterProcedureDummyAttrName();
+ getCharacterProcedureDummyAttrName();
// The charProcAttr attribute is only used as a safety to
// confirm that this is a dummy procedure and should be split.
// It cannot be used to match because attributes are not
lowerComplexSignatureArg(ty, newInTys);
})
.Case<mlir::TupleType>([&](mlir::TupleType tuple) {
- if (factory::isCharacterProcedureTuple(tuple)) {
+ if (isCharacterProcedureTuple(tuple)) {
newInTys.push_back(tuple.getType(0));
trailingInTys.push_back(tuple.getType(1));
} else {
return false;
}
for (auto ty : func.getInputs())
- if (((ty.isa<BoxCharType>() || factory::isCharacterProcedureTuple(ty)) &&
+ if (((ty.isa<BoxCharType>() || isCharacterProcedureTuple(ty)) &&
!noCharacterConversion) ||
(isa_complex(ty) && !noComplexConversion)) {
LLVM_DEBUG(llvm::dbgs() << "rewrite " << signature << " for target\n");
return true;
}
+ /// Determine if the signature has host associations. The host association
+ /// argument may need special target specific rewriting.
+ static bool hasHostAssociations(mlir::FuncOp func) {
+ std::size_t end = func.getFunctionType().getInputs().size();
+ for (std::size_t i = 0; i < end; ++i)
+ if (func.getArgAttrOfType<mlir::UnitAttr>(i, getHostAssocAttrName()))
+ return true;
+ return false;
+ }
+
/// Rewrite the signatures and body of the `FuncOp`s in the module for
/// the immediately subsequent target code gen.
void convertSignature(mlir::FuncOp func) {
auto funcTy = func.getFunctionType().cast<mlir::FunctionType>();
- if (hasPortableSignature(funcTy))
+ if (hasPortableSignature(funcTy) && !hasHostAssociations(func))
return;
llvm::SmallVector<mlir::Type> newResTys;
llvm::SmallVector<mlir::Type> newInTys;
doComplexArg(func, cmplx, newInTys, fixups);
})
.Case<mlir::TupleType>([&](mlir::TupleType tuple) {
- if (factory::isCharacterProcedureTuple(tuple)) {
+ if (isCharacterProcedureTuple(tuple)) {
fixups.emplace_back(FixupTy::Codes::TrailingCharProc,
newInTys.size(), trailingTys.size());
newInTys.push_back(tuple.getType(0));
}
})
.Default([&](mlir::Type ty) { newInTys.push_back(ty); });
+ if (func.getArgAttrOfType<mlir::UnitAttr>(index,
+ getHostAssocAttrName())) {
+ func.setArgAttr(index, "llvm.nest", rewriter->getUnitAttr());
+ }
}
if (!func.empty()) {
func.front().eraseArgument(fixup.index + 1);
} break;
case FixupTy::Codes::TrailingCharProc: {
- // The FIR character procedure argument tuple has been split into a
+ // The FIR character procedure argument tuple must be split into a
// pair of distinct arguments. The first part of the pair appears in
// the original argument position. The second part of the pair is
// appended after all the original arguments.
.getElementType();
}
+ // fir.boxproc<any> --> llvm<"{ any*, i8* }">
+ mlir::Type convertBoxProcType(BoxProcType boxproc) {
+ auto funcTy = convertType(boxproc.getEleTy());
+ auto i8PtrTy = mlir::LLVM::LLVMPointerType::get(
+ mlir::IntegerType::get(&getContext(), 8));
+ llvm::SmallVector<mlir::Type, 2> tuple = {funcTy, i8PtrTy};
+ return mlir::LLVM::LLVMStructType::getLiteral(&getContext(), tuple,
+ /*isPacked=*/false);
+ }
+
unsigned characterBitsize(fir::CharacterType charTy) {
return kindMapping.getCharacterBitsize(charTy.getFKind());
}
// EmboxProcOp
//===----------------------------------------------------------------------===//
-mlir::ParseResult EmboxProcOp::parse(mlir::OpAsmParser &parser,
- mlir::OperationState &result) {
- mlir::SymbolRefAttr procRef;
- if (parser.parseAttribute(procRef, "funcname", result.attributes))
- return mlir::failure();
- bool hasTuple = false;
- mlir::OpAsmParser::UnresolvedOperand tupleRef;
- if (!parser.parseOptionalComma()) {
- if (parser.parseOperand(tupleRef))
- return mlir::failure();
- hasTuple = true;
- }
- mlir::FunctionType type;
- if (parser.parseColon() || parser.parseLParen() || parser.parseType(type))
- return mlir::failure();
- result.addAttribute("functype", mlir::TypeAttr::get(type));
- if (hasTuple) {
- mlir::Type tupleType;
- if (parser.parseComma() || parser.parseType(tupleType) ||
- parser.resolveOperand(tupleRef, tupleType, result.operands))
- return mlir::failure();
- }
- mlir::Type boxType;
- if (parser.parseRParen() || parser.parseArrow() ||
- parser.parseType(boxType) || parser.addTypesToList(boxType, result.types))
- return mlir::failure();
- return mlir::success();
-}
-
-void EmboxProcOp::print(mlir::OpAsmPrinter &p) {
- p << ' ' << getOperation()->getAttr("funcname");
- auto h = getHost();
- if (h) {
- p << ", ";
- p.printOperand(h);
- }
- p << " : (" << getOperation()->getAttr("functype");
- if (h)
- p << ", " << h.getType();
- p << ") -> " << getType();
-}
-
mlir::LogicalResult EmboxProcOp::verify() {
// host bindings (optional) must be a reference to a tuple
if (auto h = getHost()) {
- if (auto r = h.getType().dyn_cast<ReferenceType>()) {
- if (!r.getEleTy().dyn_cast<mlir::TupleType>())
- return mlir::failure();
- } else {
- return mlir::failure();
- }
+ if (auto r = h.getType().dyn_cast<ReferenceType>())
+ if (r.getEleTy().dyn_cast<mlir::TupleType>())
+ return mlir::success();
+ return mlir::failure();
}
return mlir::success();
}
}
};
-struct MangleNameOnEmboxProcOp
- : public mlir::OpRewritePattern<fir::EmboxProcOp> {
-public:
- using OpRewritePattern::OpRewritePattern;
-
- mlir::LogicalResult
- matchAndRewrite(fir::EmboxProcOp op,
- mlir::PatternRewriter &rewriter) const override {
- rewriter.startRootUpdate(op);
- auto result = fir::NameUniquer::deconstruct(
- op.getFuncname().getRootReference().getValue());
- if (fir::NameUniquer::isExternalFacingUniquedName(result))
- op.setFuncnameAttr(
- SymbolRefAttr::get(op.getContext(), mangleExternalName(result)));
- rewriter.finalizeRootUpdate(op);
- return success();
- }
-};
-
class ExternalNameConversionPass
: public fir::ExternalNameConversionBase<ExternalNameConversionPass> {
public:
mlir::RewritePatternSet patterns(context);
patterns.insert<MangleNameOnCallOp, MangleNameOnCallOp, MangleNameOnFuncOp,
- MangleNameForCommonBlock, MangleNameOnAddrOfOp,
- MangleNameOnEmboxProcOp>(context);
+ MangleNameForCommonBlock, MangleNameOnAddrOfOp>(context);
ConversionTarget target(*context);
target.addLegalDialect<fir::FIROpsDialect, LLVM::LLVMDialect,
op.getSymbol().getRootReference().getValue());
});
- target.addDynamicallyLegalOp<fir::EmboxProcOp>([](fir::EmboxProcOp op) {
- return !fir::NameUniquer::needExternalNameMangling(
- op.getFuncname().getRootReference().getValue());
- });
-
if (failed(applyPartialConversion(op, target, std::move(patterns))))
signalPassFailure();
}
+++ /dev/null
-// RUN: %not_todo_cmd fir-opt --fir-to-llvm-ir="target=x86_64-unknown-linux-gnu" %s 2>&1 | FileCheck %s
-
-// Test `fir.emboxproc` conversion to llvm.
-// Not implemented yet.
-
-func @emboxproc_test() {
- %host_vars = fir.alloca tuple<i32,f64>
-// CHECK: not yet implemented fir.emboxproc codegen
- %bproc = fir.emboxproc @method_impl, %host_vars : ((i32) -> (), !fir.ref<tuple<i32,f64>>) -> !fir.boxproc<(i32) -> ()>
- return
-}
func @_QPfoo() {
%e6 = fir.alloca tuple<i32,f64>
- %0 = fir.emboxproc @_QPfoo_impl, %e6 : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32,f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
+ %ao = fir.address_of(@_QPfoo_impl) : (!fir.box<!fir.type<derived3{f:f32}>>) -> ()
+ %0 = fir.emboxproc %ao, %e6 : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32,f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
return
}
func private @_QPfoo_impl(!fir.ref<i32>)
-// CHECK: %{{.*}}= fir.emboxproc @foo_impl_
+// CHECK: fir.address_of(@foo_impl_)
%6 = fir.embox %5 : (!fir.heap<!fir.array<100xf32>>) -> !fir.box<!fir.array<100xf32>>
// CHECK: [[VAL_7:%.*]] = fir.box_addr [[VAL_6]] : (!fir.box<!fir.array<100xf32>>) -> !fir.ref<!fir.array<100xf32>>
+ %7 = fir.box_addr %6 : (!fir.box<!fir.array<100xf32>>) -> !fir.ref<!fir.array<100xf32>>
+// CHECK: %[[WAL_2:.*]] = fir.undefined !fir.boxproc<() -> ()>
+ %ba1 = fir.undefined !fir.boxproc<() -> ()>
+// CHECK: %{{.*}} = fir.box_addr %[[WAL_2]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ %ba2 = fir.box_addr %ba1 : (!fir.boxproc<() -> ()>) -> (() -> ())
+ %ba3 = fir.undefined !fir.boxchar<1>
+// CHECK: %{{.*}} = fir.box_addr %{{.*}} : (!fir.boxchar<1>) -> !fir.ref<!fir.char<1>>
+ %ba4 = fir.box_addr %ba3 : (!fir.boxchar<1>) -> !fir.ref<!fir.char<1>>
+ %c0 = arith.constant 0 : index
+ %d1:3 = fir.box_dims %6, %c0 : (!fir.box<!fir.array<100xf32>>, index) -> (index, index, index)
// CHECK: [[VAL_8:%.*]] = arith.constant 0 : index
// CHECK: [[VAL_9:%.*]]:3 = fir.box_dims [[VAL_6]], [[VAL_8]] : (!fir.box<!fir.array<100xf32>>, index) -> (index, index, index)
// CHECK: fir.call @print_index3([[VAL_9]]#0, [[VAL_9]]#1, [[VAL_9]]#2) : (index, index, index) -> ()
// CHECK: [[VAL_10:%.*]] = fir.call @it1() : () -> !fir.int<4>
- %7 = fir.box_addr %6 : (!fir.box<!fir.array<100xf32>>) -> !fir.ref<!fir.array<100xf32>>
- %c0 = arith.constant 0 : index
- %d1:3 = fir.box_dims %6, %c0 : (!fir.box<!fir.array<100xf32>>, index) -> (index, index, index)
fir.call @print_index3(%d1#0, %d1#1, %d1#2) : (index, index, index) -> ()
%8 = fir.call @it1() : () -> !fir.int<4>
// CHECK: [[VAL_53:%.*]] = arith.constant 4.213000e+01 : f64
// CHECK: [[VAL_54:%.*]] = fir.insert_value [[VAL_48]], [[VAL_53]], [1 : i32] : (!fir.type<qq2{f1:i32,f2:f64}>, f64) -> !fir.type<qq2{f1:i32,f2:f64}>
// CHECK: fir.store [[VAL_54]] to [[VAL_39]] : !fir.ref<!fir.type<qq2{f1:i32,f2:f64}>>
-// CHECK: [[VAL_55:%.*]] = fir.emboxproc @method_impl, [[VAL_41]] : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32, f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
+// CHECK: %[[WAL_1:.*]] = fir.address_of(@method_impl)
+// CHECK: [[VAL_55:%.*]] = fir.emboxproc %[[WAL_1]], [[VAL_41]] : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32, f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
// CHECK: [[VAL_56:%.*]], [[VAL_57:%.*]] = fir.unboxproc [[VAL_55]] : (!fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>) -> ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<!fir.type<qq2{f1:i32,f2:f64}>>>)
// CHECK: [[VAL_58:%.*]] = fir.call @box2() : () -> !fir.boxproc<(i32, i32) -> i64>
// CHECK: [[VAL_59:%.*]], [[VAL_60:%.*]] = fir.unboxproc [[VAL_58]] : (!fir.boxproc<(i32, i32) -> i64>) -> ((i32, i32) -> i64, !fir.ref<tuple<!fir.type<qq1{f1:i32}>>>)
%c42 = arith.constant 42.13 : f64
%a3 = fir.insert_value %6, %c42, [1 : i32] : (!fir.type<qq2{f1:i32,f2:f64}>, f64) -> !fir.type<qq2{f1:i32,f2:f64}>
fir.store %a3 to %d6 : !fir.ref<!fir.type<qq2{f1:i32,f2:f64}>>
- %7 = fir.emboxproc @method_impl, %e6 : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32,f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
+ %mi = fir.address_of(@method_impl) : (!fir.box<!fir.type<derived3{f:f32}>>) -> ()
+ %7 = fir.emboxproc %mi, %e6 : ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<i32,f64>>) -> !fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>
%8:2 = fir.unboxproc %7 : (!fir.boxproc<(!fir.box<!fir.type<derived3{f:f32}>>) -> ()>) -> ((!fir.box<!fir.type<derived3{f:f32}>>) -> (), !fir.ref<tuple<!fir.type<qq2{f1:i32,f2:f64}>>>)
%9 = fir.call @box2() : () -> !fir.boxproc<(i32, i32) -> i64>
%10:2 = fir.unboxproc %9 : (!fir.boxproc<(i32, i32) -> i64>) -> ((i32, i32) -> i64, !fir.ref<tuple<!fir.type<qq1{f1:i32}>>>)
! CHECK-LABEL: len_test
subroutine len_test(i, c)
- integer :: i
- character(*) :: c
- ! CHECK: %[[c:.*]]:2 = fir.unboxchar %arg1
- ! CHECK: %[[xx:.*]] = fir.convert %[[c]]#1 : (index) -> i32
- ! CHECK: fir.store %[[xx]] to %arg0
- i = len(c)
- end subroutine
-
- ! CHECK-LABEL: len_test_array
- ! CHECK-SAME: %[[arg0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"}, %[[arg1:.*]]: !fir.boxchar<1> {fir.bindc_name = "c"}
- subroutine len_test_array(i, c)
- integer :: i
- character(*) :: c(100)
- ! CHECK: %[[c:.*]]:2 = fir.unboxchar %[[arg1]]
- ! CHECK: %[[xx:.*]] = fir.convert %[[c]]#1 : (index) -> i32
- ! CHECK: fir.store %[[xx]] to %[[arg0]]
- i = len(c)
- end subroutine
-
- ! CHECK-LABEL: func @_QPlen_test_assumed_shape_array(
- ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
- ! CHECK-SAME: %[[VAL_1:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_name = "c"}) {
- subroutine len_test_assumed_shape_array(i, c)
- integer :: i
- character(*) :: c(:)
- ! CHECK: %[[VAL_2:.*]] = fir.box_elesize %[[VAL_1]] : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> index
- ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (index) -> i32
- ! CHECK: fir.store %[[VAL_3]] to %[[VAL_0]] : !fir.ref<i32>
- i = len(c)
- end subroutine
-
- ! CHECK-LABEL: func @_QPlen_test_array_alloc(
- ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
- ! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>> {fir.bindc_name = "c"}) {
- subroutine len_test_array_alloc(i, c)
- integer :: i
- character(:), allocatable :: c(:)
- ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
- ! CHECK: %[[VAL_3:.*]] = fir.box_elesize %[[VAL_2]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>) -> index
- ! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (index) -> i32
- ! CHECK: fir.store %[[VAL_4]] to %[[VAL_0]] : !fir.ref<i32>
- i = len(c)
- end subroutine
-
- ! CHECK-LABEL: func @_QPlen_test_array_local_alloc(
- ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"})
- subroutine len_test_array_local_alloc(i)
- integer :: i
- character(:), allocatable :: c(:)
- ! CHECK: %[[VAL_5:.*]] = fir.alloca index {uniq_name = "_QFlen_test_array_local_allocEc.len"}
- ! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i32
- ! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_7]] : (i32) -> index
- ! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<index>
- allocate(character(10):: c(100))
- ! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_5]] : !fir.ref<index>
- ! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (index) -> i32
- ! CHECK: fir.store %[[VAL_14]] to %[[VAL_0]] : !fir.ref<i32>
- i = len(c)
- end subroutine
-
- ! CHECK-LABEL: func @_QPlen_test_alloc_explicit_len(
- ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
- ! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"},
- ! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>> {fir.bindc_name = "c"}) {
- subroutine len_test_alloc_explicit_len(i, n, c)
- integer :: i
- integer :: n
- character(n), allocatable :: c(:)
- ! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
- ! CHECK: fir.store %[[VAL_3]] to %[[VAL_0]] : !fir.ref<i32>
- i = len(c)
- end subroutine
+ integer :: i
+ character(*) :: c
+ ! CHECK: %[[c:.*]]:2 = fir.unboxchar %arg1
+ ! CHECK: %[[xx:.*]] = fir.convert %[[c]]#1 : (index) -> i32
+ ! CHECK: fir.store %[[xx]] to %arg0
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: len_test_array
+! CHECK-SAME: %[[arg0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"}, %[[arg1:.*]]: !fir.boxchar<1> {fir.bindc_name = "c"}
+subroutine len_test_array(i, c)
+ integer :: i
+ character(*) :: c(100)
+ ! CHECK: %[[c:.*]]:2 = fir.unboxchar %[[arg1]]
+ ! CHECK: %[[xx:.*]] = fir.convert %[[c]]#1 : (index) -> i32
+ ! CHECK: fir.store %[[xx]] to %[[arg0]]
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_assumed_shape_array(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.box<!fir.array<?x!fir.char<1,?>>> {fir.bindc_name = "c"}) {
+subroutine len_test_assumed_shape_array(i, c)
+ integer :: i
+ character(*) :: c(:)
+! CHECK: %[[VAL_2:.*]] = fir.box_elesize %[[VAL_1]] : (!fir.box<!fir.array<?x!fir.char<1,?>>>) -> index
+! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (index) -> i32
+! CHECK: fir.store %[[VAL_3]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_array_alloc(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>> {fir.bindc_name = "c"}) {
+subroutine len_test_array_alloc(i, c)
+ integer :: i
+ character(:), allocatable :: c(:)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
+! CHECK: %[[VAL_3:.*]] = fir.box_elesize %[[VAL_2]] : (!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>) -> index
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (index) -> i32
+! CHECK: fir.store %[[VAL_4]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_array_local_alloc(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"})
+subroutine len_test_array_local_alloc(i)
+ integer :: i
+ character(:), allocatable :: c(:)
+! CHECK: %[[VAL_5:.*]] = fir.alloca index {uniq_name = "_QFlen_test_array_local_allocEc.len"}
+! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i32
+! CHECK: %[[VAL_10:.*]] = fir.convert %[[VAL_7]] : (i32) -> index
+! CHECK: fir.store %[[VAL_10]] to %[[VAL_5]] : !fir.ref<index>
+ allocate(character(10):: c(100))
+! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_5]] : !fir.ref<index>
+! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (index) -> i32
+! CHECK: fir.store %[[VAL_14]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_alloc_explicit_len(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"},
+! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>> {fir.bindc_name = "c"}) {
+subroutine len_test_alloc_explicit_len(i, n, c)
+ integer :: i
+ integer :: n
+ character(n), allocatable :: c(:)
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: %[[len:.*]] = arith.select %[[cmp]], %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: fir.store %[[len]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_pointer_explicit_len(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"},
+subroutine len_test_pointer_explicit_len(i, n, c)
+ integer :: i
+ integer :: n
+ character(n), pointer :: c(:)
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: %[[len:.*]] = arith.select %[[cmp]], %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: fir.store %[[len]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
+
+! CHECK-LABEL: func @_QPlen_test_assumed_shape_explicit_len(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "i"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "n"},
+subroutine len_test_assumed_shape_explicit_len(i, n, c)
+ integer :: i
+ integer :: n
+ character(n) :: c(:)
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: %[[len:.*]] = arith.select %[[cmp]], %[[VAL_3]], %[[c0_i32]] : i32
+! CHECK: fir.store %[[len]] to %[[VAL_0]] : !fir.ref<i32>
+ i = len(c)
+end subroutine
! RUN: bbc -emit-fir %s -o - | FileCheck %s
module alloc_assign
+ type t
+ integer :: i
+ end type
contains
! -----------------------------------------------------------------------------
subroutine test_dyn_char_scalar(x, n)
integer :: n
character(n), allocatable :: x
-! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[VAL_2A:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_2B:.*]] = arith.cmpi sgt, %[[VAL_2A]], %[[c0_i32]] : i32
+! CHECK: %[[VAL_2:.*]] = arith.select %[[VAL_2B]], %[[VAL_2A]], %[[c0_i32]] : i32
! CHECK: %[[VAL_3:.*]] = fir.address_of(@_QQcl.48656C6C6F20776F726C6421) : !fir.ref<!fir.char<1,12>>
! CHECK: %[[VAL_4:.*]] = arith.constant 12 : index
! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
x = "Hello world!"
end subroutine
+! CHECK-LABEL: func @_QMalloc_assignPtest_derived_scalar(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>>>{{.*}},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<!fir.type<_QMalloc_assignTt{i:i32}>>{{.*}}) {
+subroutine test_derived_scalar(x, s)
+ type(t), allocatable :: x
+ type(t) :: s
+ x = s
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>>>
+! CHECK: %[[VAL_3:.*]] = fir.box_addr %[[VAL_2]] : (!fir.box<!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>>) -> !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>
+! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>) -> i64
+! CHECK: %[[VAL_5:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_6:.*]] = arith.cmpi ne, %[[VAL_4]], %[[VAL_5]] : i64
+! CHECK: %[[VAL_7:.*]]:2 = fir.if %[[VAL_6]] -> (i1, !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>) {
+! CHECK: %[[VAL_8:.*]] = arith.constant false
+! CHECK: %[[VAL_9:.*]] = fir.if %[[VAL_8]] -> (!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>) {
+! CHECK: %[[VAL_10:.*]] = fir.allocmem !fir.type<_QMalloc_assignTt{i:i32}> {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_10]] : !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_3]] : !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_8]], %[[VAL_11:.*]] : i1, !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>
+! CHECK: } else {
+! CHECK: %[[VAL_12:.*]] = arith.constant true
+! CHECK: %[[VAL_13:.*]] = fir.allocmem !fir.type<_QMalloc_assignTt{i:i32}> {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_12]], %[[VAL_13]] : i1, !fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>
+! CHECK: }
+! CHECK: %[[VAL_14:.*]] = fir.field_index i, !fir.type<_QMalloc_assignTt{i:i32}>
+! CHECK: %[[VAL_15:.*]] = fir.coordinate_of %[[VAL_1]], %[[VAL_14]] : (!fir.ref<!fir.type<_QMalloc_assignTt{i:i32}>>, !fir.field) -> !fir.ref<i32>
+! CHECK: %[[VAL_16:.*]] = fir.coordinate_of %[[VAL_7]]#1, %[[VAL_14]] : (!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>, !fir.field) -> !fir.ref<i32>
+! CHECK: %[[VAL_17:.*]] = fir.load %[[VAL_15]] : !fir.ref<i32>
+! CHECK: fir.store %[[VAL_17]] to %[[VAL_16]] : !fir.ref<i32
+! CHECK: fir.if %[[VAL_7]]#0 {
+! CHECK: fir.if %[[VAL_6]] {
+! CHECK: fir.freemem %[[VAL_3]]
+! CHECK: }
+! CHECK: %[[VAL_19:.*]] = fir.embox %[[VAL_7]]#1 : (!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>) -> !fir.box<!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>>
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.type<_QMalloc_assignTt{i:i32}>>>>
+! CHECK: }
+end subroutine
+
! -----------------------------------------------------------------------------
! Test numeric/logical array RHS
! -----------------------------------------------------------------------------
x = y
end subroutine
+! CHECK-LABEL: func @_QMalloc_assignPtest_runtime_shape(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>{{.*}}) {
+subroutine test_runtime_shape(x)
+ real, allocatable :: x(:, :)
+ interface
+ function return_pointer()
+ real, pointer :: return_pointer(:, :)
+ end function
+ end interface
+! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?x?xf32>>> {bindc_name = ".result"}
+! CHECK: %[[VAL_2:.*]] = fir.call @_QPreturn_pointer() : () -> !fir.box<!fir.ptr<!fir.array<?x?xf32>>>
+! CHECK: fir.save_result %[[VAL_2]] to %[[VAL_1]] : !fir.box<!fir.ptr<!fir.array<?x?xf32>>>, !fir.ref<!fir.box<!fir.ptr<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_1]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_5:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_4]] : (!fir.box<!fir.ptr<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_7:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_6]] : (!fir.box<!fir.ptr<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_8:.*]] = fir.shift %[[VAL_5]]#0, %[[VAL_7]]#0 : (index, index) -> !fir.shift<2>
+! CHECK: %[[VAL_10:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_11:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_10]] : (!fir.box<!fir.ptr<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_12:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_13:.*]]:3 = fir.box_dims %[[VAL_3]], %[[VAL_12]] : (!fir.box<!fir.ptr<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_9:.*]] = fir.array_load %[[VAL_3]](%[[VAL_8]]) : (!fir.box<!fir.ptr<!fir.array<?x?xf32>>>, !fir.shift<2>) -> !fir.array<?x?xf32>
+! CHECK: %[[VAL_14:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: %[[VAL_15:.*]] = fir.box_addr %[[VAL_14]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_15]] : (!fir.heap<!fir.array<?x?xf32>>) -> i64
+! CHECK: %[[VAL_17:.*]] = arith.constant 0 : i64
+! CHECK: %[[VAL_18:.*]] = arith.cmpi ne, %[[VAL_16]], %[[VAL_17]] : i64
+! CHECK: %[[VAL_19:.*]]:2 = fir.if %[[VAL_18]] -> (i1, !fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_20:.*]] = arith.constant false
+! CHECK: %[[VAL_21:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_22:.*]]:3 = fir.box_dims %[[VAL_14]], %[[VAL_21]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_23:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_24:.*]]:3 = fir.box_dims %[[VAL_14]], %[[VAL_23]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_25:.*]] = arith.cmpi ne, %[[VAL_22]]#1, %[[VAL_11]]#1 : index
+! CHECK: %[[VAL_26:.*]] = arith.select %[[VAL_25]], %[[VAL_25]], %[[VAL_20]] : i1
+! CHECK: %[[VAL_27:.*]] = arith.cmpi ne, %[[VAL_24]]#1, %[[VAL_13]]#1 : index
+! CHECK: %[[VAL_28:.*]] = arith.select %[[VAL_27]], %[[VAL_27]], %[[VAL_26]] : i1
+! CHECK: %[[VAL_29:.*]] = fir.if %[[VAL_28]] -> (!fir.heap<!fir.array<?x?xf32>>) {
+! CHECK: %[[VAL_30:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_11]]#1, %[[VAL_13]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_30]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: fir.result %[[VAL_15]] : !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+! CHECK: fir.result %[[VAL_28]], %[[VAL_31:.*]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: } else {
+! CHECK: %[[VAL_32:.*]] = arith.constant true
+! CHECK: %[[VAL_33:.*]] = fir.allocmem !fir.array<?x?xf32>, %[[VAL_11]]#1, %[[VAL_13]]#1 {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_32]], %[[VAL_33]] : i1, !fir.heap<!fir.array<?x?xf32>>
+! CHECK: }
+
+! CHECK-NOT: fir.call @_QPreturn_pointer()
+! CHECK: %[[VAL_34:.*]] = fir.shape %[[VAL_11]]#1, %[[VAL_13]]#1 : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_35:.*]] = fir.array_load %[[VAL_19]]#1(%[[VAL_34]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.array<?x?xf32>
+! normal array assignment ....
+! CHECK-NOT: fir.call @_QPreturn_pointer()
+! CHECK: fir.array_merge_store %{{.*}}, %{{.*}} to %[[VAL_19]]#1 : !fir.array<?x?xf32>, !fir.array<?x?xf32>, !fir.heap<!fir.array<?x?xf32>>
+! CHECK-NOT: fir.call @_QPreturn_pointer()
+
+! CHECK: fir.if %[[VAL_19]]#0 {
+! CHECK: fir.if %[[VAL_18]] {
+! CHECK: fir.freemem %[[VAL_15]]
+! CHECK: }
+! CHECK: %[[VAL_56:.*]] = fir.shape %[[VAL_11]]#1, %[[VAL_13]]#1 : (index, index) -> !fir.shape<2>
+! CHECK: %[[VAL_57:.*]] = fir.embox %[[VAL_19]]#1(%[[VAL_56]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
+! CHECK: fir.store %[[VAL_57]] to %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+! CHECK: }
+ x = return_pointer()
+end subroutine
+
! CHECK-LABEL: func @_QMalloc_assignPtest_scalar_rhs(
subroutine test_scalar_rhs(x, y)
real, allocatable :: x(:)
! Test character array RHS
! -----------------------------------------------------------------------------
+
+! Hit TODO: gathering lhs length in array expression
+!subroutine test_deferred_char_rhs_scalar(x)
+! character(:), allocatable :: x(:)
+! x = "Hello world!"
+!end subroutine
+
! CHECK: func @_QMalloc_assignPtest_cst_char_rhs_scalar(
subroutine test_cst_char_rhs_scalar(x)
character(10), allocatable :: x(:)
! CHECK: fir.if %false -> {{.*}} {
! CHECK: }
! CHECK: } else {
- ! CHECK: fir.call @_FortranAReportFatalUserError
+ ! TODO: runtime error if unallocated
! CHECK-NOT: allocmem
! CHECK: }
end subroutine
! CHECK: fir.if %false -> {{.*}} {
! CHECK: }
! CHECK: } else {
- ! CHECK: fir.call @_FortranAReportFatalUserError
+ ! TODO: runtime error if unallocated
! CHECK-NOT: allocmem
! CHECK: }
end subroutine
+! Hit TODO: gathering lhs length in array expression
+!subroutine test_deferred_char(x, c)
+! character(:), allocatable :: x(:)
+! character(12) :: c(20)
+! x = "Hello world!"
+!end subroutine
+
! CHECK-LABEL: func @_QMalloc_assignPtest_cst_char(
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>{{.*}},
! CHECK-SAME: %[[VAL_1:.*]]: !fir.boxchar<1>{{.*}}) {
! CHECK: %[[VAL_3:.*]]:2 = fir.unboxchar %[[VAL_2]] : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]]#0 : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<20x!fir.char<1,?>>>
! CHECK: %[[VAL_5_0:.*]] = arith.constant 20 : index
-! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[VAL_6A:.*]] = fir.load %[[VAL_1]] : !fir.ref<i32>
+! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_6B:.*]] = arith.cmpi sgt, %[[VAL_6A]], %[[c0_i32]] : i32
+! CHECK: %[[VAL_6:.*]] = arith.select %[[VAL_6B]], %[[VAL_6A]], %[[c0_i32]] : i32
! CHECK: %[[VAL_5:.*]] = arith.constant 20 : index
! CHECK: %[[VAL_7:.*]] = fir.shape %[[VAL_5_0]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_9:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
x = c
end subroutine
+! CHECK-LABEL: func @_QMalloc_assignPtest_derived_with_init
+subroutine test_derived_with_init(x, y)
+ type t
+ integer, allocatable :: a(:)
+ end type
+ type(t), allocatable :: x
+ type(t) :: y
+ ! The allocatable component of `x` need to be initialized
+ ! during the automatic allocation (setting its rank and allocation
+ ! status) before it is assigned with the component of `y`
+ x = y
+! CHECK: fir.if %{{.*}} {
+! CHECK: %[[VAL_11:.*]] = fir.allocmem !fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}> {uniq_name = ".auto.alloc"}
+! CHECK: %[[VAL_12:.*]] = fir.embox %[[VAL_11]] : (!fir.heap<!fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>) -> !fir.box<!fir.heap<!fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.box<!fir.heap<!fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>>) -> !fir.box<none>
+! CHECK: fir.call @_FortranAInitialize(%[[VAL_15]], %{{.*}}, %{{.*}}) : (!fir.box<none>, !fir.ref<i8>, i32) -> none
+! CHECK: fir.result %[[VAL_11]] : !fir.heap<!fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
+! CHECK: } else {
+! CHECK: fir.result %{{.*}} : !fir.heap<!fir.type<_QMalloc_assignFtest_derived_with_initTt{a:!fir.box<!fir.heap<!fir.array<?xi32>>>}>>
+! CHECK: }
+end subroutine
+
+! CHECK-LABEL: func @_QMalloc_assignPtest_vector_subscript(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {fir.bindc_name = "x"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "y"},
+! CHECK-SAME: %[[VAL_2:.*]]: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "v"}) {
+subroutine test_vector_subscript(x, y, v)
+ ! Test that the new shape is computed correctly in presence of
+ ! vector subscripts on the RHS and that it is used to allocate
+ ! the new storage and to drive the implicit loop.
+ integer, allocatable :: x(:)
+ integer :: y(:), v(:)
+ x = y(v)
+! CHECK: %[[VAL_3:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_4:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_5:.*]]:3 = fir.box_dims %[[VAL_1]], %[[VAL_4]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_6:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_7:.*]]:3 = fir.box_dims %[[VAL_2]], %[[VAL_6]] : (!fir.box<!fir.array<?xi32>>, index) -> (index, index, index)
+! CHECK: %[[VAL_8:.*]] = fir.array_load %[[VAL_2]] : (!fir.box<!fir.array<?xi32>>) -> !fir.array<?xi32>
+! CHECK: %[[VAL_9:.*]] = arith.cmpi sgt, %[[VAL_7]]#1, %[[VAL_5]]#1 : index
+! CHECK: %[[VAL_10:.*]] = arith.select %[[VAL_9]], %[[VAL_5]]#1, %[[VAL_7]]#1 : index
+! CHECK: fir.if {{.*}} {
+! CHECK: %[[VAL_18:.*]] = arith.constant false
+! CHECK: %[[VAL_20:.*]]:3 = fir.box_dims %{{.*}}, %{{.*}} : (!fir.box<!fir.heap<!fir.array<?xi32>>>, index) -> (index, index, index)
+! CHECK: %[[VAL_21:.*]] = arith.cmpi ne, %[[VAL_20]]#1, %[[VAL_10]] : index
+! CHECK: %[[VAL_22:.*]] = arith.select %[[VAL_21]], %[[VAL_21]], %[[VAL_18]] : i1
+! CHECK: fir.if %[[VAL_22]] {{.*}} {
+! CHECK: %[[VAL_24:.*]] = fir.allocmem !fir.array<?xi32>, %[[VAL_10]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %[[VAL_24]] : !fir.heap<!fir.array<?xi32>>
+! CHECK: } else {
+! CHECK: fir.result %{{.*}} : !fir.heap<!fir.array<?xi32>>
+! CHECK: }
+! CHECK: fir.result %{{.*}}, %{{.*}}
+! CHECK: } else {
+! CHECK: %[[VAL_27:.*]] = fir.allocmem !fir.array<?xi32>, %[[VAL_10]] {uniq_name = ".auto.alloc"}
+! CHECK: fir.result %{{.*}}, %[[VAL_27]] : i1, !fir.heap<!fir.array<?xi32>>
+! CHECK: }
+! CHECK: %[[VAL_28:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
+! CHECK: %[[VAL_29:.*]] = fir.array_load %[[VAL_30:.*]]#1(%[[VAL_28]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.array<?xi32>
+! CHECK: %[[VAL_31:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_32:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_33:.*]] = arith.subi %[[VAL_10]], %[[VAL_31]] : index
+! CHECK: %[[VAL_34:.*]] = fir.do_loop %[[VAL_35:.*]] = %[[VAL_32]] to %[[VAL_33]] step %[[VAL_31]] {{.*}} {
+! CHECK: }
+end subroutine
+
+! CHECK: fir.global linkonce @[[error_message]] constant : !fir.char<1,76> {
+! CHECK: %[[msg:.*]] = fir.string_lit "array left hand side must be allocated when the right hand side is a scalar\00"(76) : !fir.char<1,76>
+! CHECK: fir.has_value %[[msg:.*]] : !fir.char<1,76>
+! CHECK: }
+
end module
+
+! use alloc_assign
+! real :: y(2, 3) = reshape([1,2,3,4,5,6], [2,3])
+! real, allocatable :: x (:, :)
+! allocate(x(2,2))
+! call test_with_lbounds(x, y)
+! print *, x(10, 20)
+! print *, x
+!end
character(n), allocatable :: c
external foo1
! Check that the length expr was evaluated before the execution parts.
- ! CHECK: %[[len:.*]] = fir.load %arg1 : !fir.ref<i32>
+ ! CHECK: %[[raw_len:.*]] = fir.load %arg1 : !fir.ref<i32>
+ ! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[raw_len]], %[[c0_i32]] : i32
+ ! CHECK: %[[len:.*]] = arith.select %[[cmp]], %[[raw_len]], %[[c0_i32]] : i32
n = n + 1
! CHECK: fir.store {{.*}} to %arg1 : !fir.ref<i32>
call foo1(c)
character(n), allocatable :: c(:)
external foo1
! Check that the length expr was evaluated before the execution parts.
- ! CHECK: %[[len:.*]] = fir.load %arg1 : !fir.ref<i32>
+ ! CHECK: %[[raw_len:.*]] = fir.load %arg1 : !fir.ref<i32>
+ ! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[raw_len]], %[[c0_i32]] : i32
+ ! CHECK: %[[len:.*]] = arith.select %[[cmp]], %[[raw_len]], %[[c0_i32]] : i32
n = n + 1
! CHECK: fir.store {{.*}} to %arg1 : !fir.ref<i32>
call foo1(c(1))
! Test lowering of allocatables using runtime for allocate/deallcoate statements.
! CHECK-LABEL: _QPfoo
subroutine foo()
- real, allocatable :: x(:), y(:, :), z
- ! CHECK: %[[xBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {{{.*}}uniq_name = "_QFfooEx"}
- ! CHECK-DAG: %[[xNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?xf32>>
- ! CHECK-DAG: %[[xNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
- ! CHECK: %[[xInitEmbox:.*]] = fir.embox %[[xNullAddr]](%[[xNullShape]]) : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xf32>>>
- ! CHECK: fir.store %[[xInitEmbox]] to %[[xBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
-
- ! CHECK: %[[yBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xf32>>> {{{.*}}uniq_name = "_QFfooEy"}
- ! CHECK-DAG: %[[yNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x?xf32>>
- ! CHECK-DAG: %[[yNullShape:.*]] = fir.shape %c0{{.*}}, %c0{{.*}} : (index, index) -> !fir.shape<2>
- ! CHECK: %[[yInitEmbox:.*]] = fir.embox %[[yNullAddr]](%[[yNullShape]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
- ! CHECK: fir.store %[[yInitEmbox]] to %[[yBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
-
- ! CHECK: %[[zBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<f32>> {{{.*}}uniq_name = "_QFfooEz"}
- ! CHECK: %[[zNullAddr:.*]] = fir.zero_bits !fir.heap<f32>
- ! CHECK: %[[zInitEmbox:.*]] = fir.embox %[[zNullAddr]] : (!fir.heap<f32>) -> !fir.box<!fir.heap<f32>>
- ! CHECK: fir.store %[[zInitEmbox]] to %[[zBoxAddr]] : !fir.ref<!fir.box<!fir.heap<f32>>>
-
- allocate(x(42:100), y(43:50, 51), z)
- ! CHECK-DAG: %[[errMsg:.*]] = fir.absent !fir.box<none>
- ! CHECK-DAG: %[[xlb:.*]] = arith.constant 42 : i32
- ! CHECK-DAG: %[[xub:.*]] = arith.constant 100 : i32
- ! CHECK-DAG: %[[xBoxCast2:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK-DAG: %[[xlbCast:.*]] = fir.convert %[[xlb]] : (i32) -> i64
- ! CHECK-DAG: %[[xubCast:.*]] = fir.convert %[[xub]] : (i32) -> i64
- ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) : (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
- ! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref<i8>
- ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
-
- ! Simply check that we are emitting the right numebr of set bound for y and z. Otherwise, this is just like x.
- ! CHECK: fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableSetBounds
- ! CHECK: fir.call @{{.*}}AllocatableSetBounds
- ! CHECK: fir.call @{{.*}}AllocatableAllocate
- ! CHECK: %[[zBoxCast:.*]] = fir.convert %[[zBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK-NOT: fir.call @{{.*}}AllocatableSetBounds
- ! CHECK: fir.call @{{.*}}AllocatableAllocate
-
- ! Check that y descriptor is read when referencing it.
- ! CHECK: %[[yBoxLoad:.*]] = fir.load %[[yBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
- ! CHECK: %[[yBounds1:.*]]:3 = fir.box_dims %[[yBoxLoad]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
- ! CHECK: %[[yBounds2:.*]]:3 = fir.box_dims %[[yBoxLoad]], %c1{{.*}} : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
- ! CHECK: %[[yAddr:.*]] = fir.box_addr %[[yBoxLoad]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
- print *, x, y(45, 46), z
-
- deallocate(x, y, z)
- ! CHECK: %[[xBoxCast4:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[xBoxCast4]], {{.*}})
- ! CHECK: %[[yBoxCast4:.*]] = fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[yBoxCast4]], {{.*}})
- ! CHECK: %[[zBoxCast4:.*]] = fir.convert %[[zBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[zBoxCast4]], {{.*}})
- end subroutine
-
- ! test lowering of character allocatables
- ! CHECK-LABEL: _QPchar_deferred(
- subroutine char_deferred(n)
- integer :: n
- character(:), allocatable :: scalar, array(:)
- ! CHECK-DAG: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>> {{{.*}}uniq_name = "_QFchar_deferredEscalar"}
- ! CHECK-DAG: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,?>>
- ! CHECK-DAG: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] typeparams %c0{{.*}} : (!fir.heap<!fir.char<1,?>>, index) -> !fir.box<!fir.heap<!fir.char<1,?>>>
- ! CHECK-DAG: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
-
- ! CHECK-DAG: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>> {{{.*}}uniq_name = "_QFchar_deferredEarray"}
- ! CHECK-DAG: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,?>>>
- ! CHECK-DAG: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
- ! CHECK-DAG: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) typeparams %c0{{.*}} : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, index) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>
- ! CHECK-DAG: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
-
- allocate(character(10):: scalar, array(30))
- ! CHECK-DAG: %[[sBoxCast1:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK-DAG: %[[ten1:.*]] = fir.convert %c10{{.*}} : (i32) -> i64
- ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[sBoxCast1]], %[[ten1]], %c1{{.*}}, %c0{{.*}}, %c0{{.*}})
- ! CHECK-NOT: AllocatableSetBounds
- ! CHECK: %[[sBoxCast2:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[sBoxCast2]]
-
- ! CHECK-DAG: %[[aBoxCast1:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK-DAG: %[[ten2:.*]] = fir.convert %c10{{.*}} : (i32) -> i64
- ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[aBoxCast1]], %[[ten2]], %c1{{.*}}, %c1{{.*}}, %c0{{.*}})
- ! CHECK: %[[aBoxCast2:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[aBoxCast2]]
- ! CHECK: %[[aBoxCast3:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[aBoxCast3]]
-
- deallocate(scalar, array)
- ! CHECK: %[[sBoxCast3:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[sBoxCast3]]
- ! CHECK: %[[aBoxCast4:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[aBoxCast4]]
-
- ! only testing that the correct length is set in the descriptor.
- allocate(character(n):: scalar, array(40))
- ! CHECK: %[[n:.*]] = fir.load %arg0 : !fir.ref<i32>
- ! CHECK-DAG: %[[ncast1:.*]] = fir.convert %[[n]] : (i32) -> i64
- ! CHECK-DAG: %[[sBoxCast4:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[sBoxCast4]], %[[ncast1]], %c1{{.*}}, %c0{{.*}}, %c0{{.*}})
- ! CHECK-DAG: %[[ncast2:.*]] = fir.convert %[[n]] : (i32) -> i64
- ! CHECK-DAG: %[[aBoxCast5:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
- ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[aBoxCast5]], %[[ncast2]], %c1{{.*}}, %c1{{.*}}, %c0{{.*}})
- end subroutine
-
- ! CHECK-LABEL: _QPchar_explicit_cst(
- subroutine char_explicit_cst(n)
- integer :: n
- character(10), allocatable :: scalar, array(:)
- ! CHECK-DAG: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,10>>> {{{.*}}uniq_name = "_QFchar_explicit_cstEscalar"}
- ! CHECK-DAG: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,10>>
- ! CHECK-DAG: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] : (!fir.heap<!fir.char<1,10>>) -> !fir.box<!fir.heap<!fir.char<1,10>>>
- ! CHECK-DAG: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,10>>>>
-
- ! CHECK-DAG: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>> {{{.*}}uniq_name = "_QFchar_explicit_cstEarray"}
- ! CHECK-DAG: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,10>>>
- ! CHECK-DAG: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
- ! CHECK-DAG: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) : (!fir.heap<!fir.array<?x!fir.char<1,10>>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>
- ! CHECK-DAG: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>
- allocate(scalar, array(20))
- ! CHECK-NOT: AllocatableInitCharacter
- ! CHECK: AllocatableAllocate
- ! CHECK-NOT: AllocatableInitCharacter
- ! CHECK: AllocatableAllocate
- deallocate(scalar, array)
- ! CHECK: AllocatableDeallocate
- ! CHECK: AllocatableDeallocate
- end subroutine
-
- ! CHECK-LABEL: _QPchar_explicit_dyn(
- subroutine char_explicit_dyn(n, l1, l2)
- integer :: n, l1, l2
- character(l1), allocatable :: scalar
- ! CHECK-DAG: %[[l1:.*]] = fir.load %arg1 : !fir.ref<i32>
- ! CHECK-DAG: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>> {{{.*}}uniq_name = "_QFchar_explicit_dynEscalar"}
- ! CHECK-DAG: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,?>>
- ! CHECK-DAG: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] typeparams %[[l1]] : (!fir.heap<!fir.char<1,?>>, i32) -> !fir.box<!fir.heap<!fir.char<1,?>>>
- ! CHECK-DAG: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
-
- character(l2), allocatable :: array(:)
- ! CHECK-DAG: %[[l2:.*]] = fir.load %arg2 : !fir.ref<i32>
- ! CHECK-DAG: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>> {{{.*}}uniq_name = "_QFchar_explicit_dynEarray"}
- ! CHECK-DAG: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,?>>>
- ! CHECK-DAG: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
- ! CHECK-DAG: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) typeparams %[[l2]] : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, i32) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>
- ! CHECK-DAG: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
- allocate(scalar, array(20))
- ! CHECK-NOT: AllocatableInitCharacter
- ! CHECK: AllocatableAllocate
- ! CHECK-NOT: AllocatableInitCharacter
- ! CHECK: AllocatableAllocate
- deallocate(scalar, array)
- ! CHECK: AllocatableDeallocate
- ! CHECK: AllocatableDeallocate
- end subroutine
+ real, allocatable :: x(:), y(:, :), z
+ ! CHECK: %[[xBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {{{.*}}uniq_name = "_QFfooEx"}
+ ! CHECK-DAG: %[[xNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?xf32>>
+ ! CHECK-DAG: %[[xNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
+ ! CHECK: %[[xInitEmbox:.*]] = fir.embox %[[xNullAddr]](%[[xNullShape]]) : (!fir.heap<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xf32>>>
+ ! CHECK: fir.store %[[xInitEmbox]] to %[[xBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+
+ ! CHECK: %[[yBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x?xf32>>> {{{.*}}uniq_name = "_QFfooEy"}
+ ! CHECK-DAG: %[[yNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x?xf32>>
+ ! CHECK-DAG: %[[yNullShape:.*]] = fir.shape %c0{{.*}}, %c0{{.*}} : (index, index) -> !fir.shape<2>
+ ! CHECK: %[[yInitEmbox:.*]] = fir.embox %[[yNullAddr]](%[[yNullShape]]) : (!fir.heap<!fir.array<?x?xf32>>, !fir.shape<2>) -> !fir.box<!fir.heap<!fir.array<?x?xf32>>>
+ ! CHECK: fir.store %[[yInitEmbox]] to %[[yBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+
+ ! CHECK: %[[zBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<f32>> {{{.*}}uniq_name = "_QFfooEz"}
+ ! CHECK: %[[zNullAddr:.*]] = fir.zero_bits !fir.heap<f32>
+ ! CHECK: %[[zInitEmbox:.*]] = fir.embox %[[zNullAddr]] : (!fir.heap<f32>) -> !fir.box<!fir.heap<f32>>
+ ! CHECK: fir.store %[[zInitEmbox]] to %[[zBoxAddr]] : !fir.ref<!fir.box<!fir.heap<f32>>>
+
+ allocate(x(42:100), y(43:50, 51), z)
+ ! CHECK-DAG: %[[errMsg:.*]] = fir.absent !fir.box<none>
+ ! CHECK-DAG: %[[xlb:.*]] = arith.constant 42 : i32
+ ! CHECK-DAG: %[[xub:.*]] = arith.constant 100 : i32
+ ! CHECK-DAG: %[[xBoxCast2:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK-DAG: %[[xlbCast:.*]] = fir.convert %[[xlb]] : (i32) -> i64
+ ! CHECK-DAG: %[[xubCast:.*]] = fir.convert %[[xub]] : (i32) -> i64
+ ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[xBoxCast2]], %c0{{.*}}, %[[xlbCast]], %[[xubCast]]) : (!fir.ref<!fir.box<none>>, i32, i64, i64) -> none
+ ! CHECK-DAG: %[[xBoxCast3:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK-DAG: %[[sourceFile:.*]] = fir.convert %{{.*}} -> !fir.ref<i8>
+ ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[xBoxCast3]], %false{{.*}}, %[[errMsg]], %[[sourceFile]], %{{.*}}) : (!fir.ref<!fir.box<none>>, i1, !fir.box<none>, !fir.ref<i8>, i32) -> i32
+
+ ! Simply check that we are emitting the right numebr of set bound for y and z. Otherwise, this is just like x.
+ ! CHECK: fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableSetBounds
+ ! CHECK: fir.call @{{.*}}AllocatableSetBounds
+ ! CHECK: fir.call @{{.*}}AllocatableAllocate
+ ! CHECK: %[[zBoxCast:.*]] = fir.convert %[[zBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK-NOT: fir.call @{{.*}}AllocatableSetBounds
+ ! CHECK: fir.call @{{.*}}AllocatableAllocate
+
+ ! Check that y descriptor is read when referencing it.
+ ! CHECK: %[[yBoxLoad:.*]] = fir.load %[[yBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>
+ ! CHECK: %[[yBounds1:.*]]:3 = fir.box_dims %[[yBoxLoad]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+ ! CHECK: %[[yBounds2:.*]]:3 = fir.box_dims %[[yBoxLoad]], %c1{{.*}} : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>, index) -> (index, index, index)
+ ! CHECK: %[[yAddr:.*]] = fir.box_addr %[[yBoxLoad]] : (!fir.box<!fir.heap<!fir.array<?x?xf32>>>) -> !fir.heap<!fir.array<?x?xf32>>
+ print *, x, y(45, 46), z
+
+ deallocate(x, y, z)
+ ! CHECK: %[[xBoxCast4:.*]] = fir.convert %[[xBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[xBoxCast4]], {{.*}})
+ ! CHECK: %[[yBoxCast4:.*]] = fir.convert %[[yBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x?xf32>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[yBoxCast4]], {{.*}})
+ ! CHECK: %[[zBoxCast4:.*]] = fir.convert %[[zBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[zBoxCast4]], {{.*}})
+end subroutine
+
+! test lowering of character allocatables
+! CHECK-LABEL: _QPchar_deferred(
+subroutine char_deferred(n)
+ integer :: n
+ character(:), allocatable :: scalar, array(:)
+ ! CHECK-DAG: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>> {{{.*}}uniq_name = "_QFchar_deferredEscalar"}
+ ! CHECK-DAG: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,?>>
+ ! CHECK-DAG: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] typeparams %c0{{.*}} : (!fir.heap<!fir.char<1,?>>, index) -> !fir.box<!fir.heap<!fir.char<1,?>>>
+ ! CHECK-DAG: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+
+ ! CHECK-DAG: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>> {{{.*}}uniq_name = "_QFchar_deferredEarray"}
+ ! CHECK-DAG: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,?>>>
+ ! CHECK-DAG: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
+ ! CHECK-DAG: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) typeparams %c0{{.*}} : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, index) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>
+ ! CHECK-DAG: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
+
+ allocate(character(10):: scalar, array(30))
+ ! CHECK-DAG: %[[sBoxCast1:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK-DAG: %[[ten1:.*]] = fir.convert %c10{{.*}} : (i32) -> i64
+ ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[sBoxCast1]], %[[ten1]], %c1{{.*}}, %c0{{.*}}, %c0{{.*}})
+ ! CHECK-NOT: AllocatableSetBounds
+ ! CHECK: %[[sBoxCast2:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[sBoxCast2]]
+
+ ! CHECK-DAG: %[[aBoxCast1:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK-DAG: %[[ten2:.*]] = fir.convert %c10{{.*}} : (i32) -> i64
+ ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[aBoxCast1]], %[[ten2]], %c1{{.*}}, %c1{{.*}}, %c0{{.*}})
+ ! CHECK: %[[aBoxCast2:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableSetBounds(%[[aBoxCast2]]
+ ! CHECK: %[[aBoxCast3:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableAllocate(%[[aBoxCast3]]
+
+ deallocate(scalar, array)
+ ! CHECK: %[[sBoxCast3:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[sBoxCast3]]
+ ! CHECK: %[[aBoxCast4:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableDeallocate(%[[aBoxCast4]]
+
+ ! only testing that the correct length is set in the descriptor.
+ allocate(character(n):: scalar, array(40))
+ ! CHECK: %[[n:.*]] = fir.load %arg0 : !fir.ref<i32>
+ ! CHECK-DAG: %[[ncast1:.*]] = fir.convert %[[n]] : (i32) -> i64
+ ! CHECK-DAG: %[[sBoxCast4:.*]] = fir.convert %[[sBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[sBoxCast4]], %[[ncast1]], %c1{{.*}}, %c0{{.*}}, %c0{{.*}})
+ ! CHECK-DAG: %[[ncast2:.*]] = fir.convert %[[n]] : (i32) -> i64
+ ! CHECK-DAG: %[[aBoxCast5:.*]] = fir.convert %[[aBoxAddr]] : (!fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>) -> !fir.ref<!fir.box<none>>
+ ! CHECK: fir.call @{{.*}}AllocatableInitCharacter(%[[aBoxCast5]], %[[ncast2]], %c1{{.*}}, %c1{{.*}}, %c0{{.*}})
+end subroutine
+
+! CHECK-LABEL: _QPchar_explicit_cst(
+subroutine char_explicit_cst(n)
+ integer :: n
+ character(10), allocatable :: scalar, array(:)
+ ! CHECK-DAG: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,10>>> {{{.*}}uniq_name = "_QFchar_explicit_cstEscalar"}
+ ! CHECK-DAG: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,10>>
+ ! CHECK-DAG: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] : (!fir.heap<!fir.char<1,10>>) -> !fir.box<!fir.heap<!fir.char<1,10>>>
+ ! CHECK-DAG: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,10>>>>
+
+ ! CHECK-DAG: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>> {{{.*}}uniq_name = "_QFchar_explicit_cstEarray"}
+ ! CHECK-DAG: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,10>>>
+ ! CHECK-DAG: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
+ ! CHECK-DAG: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) : (!fir.heap<!fir.array<?x!fir.char<1,10>>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>
+ ! CHECK-DAG: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,10>>>>>
+ allocate(scalar, array(20))
+ ! CHECK-NOT: AllocatableInitCharacter
+ ! CHECK: AllocatableAllocate
+ ! CHECK-NOT: AllocatableInitCharacter
+ ! CHECK: AllocatableAllocate
+ deallocate(scalar, array)
+ ! CHECK: AllocatableDeallocate
+ ! CHECK: AllocatableDeallocate
+end subroutine
+
+! CHECK-LABEL: _QPchar_explicit_dyn(
+subroutine char_explicit_dyn(n, l1, l2)
+ integer :: n, l1, l2
+ character(l1), allocatable :: scalar
+ ! CHECK: %[[sBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.char<1,?>>> {{{.*}}uniq_name = "_QFchar_explicit_dynEscalar"}
+ ! CHECK: %[[raw_l1:.*]] = fir.load %arg1 : !fir.ref<i32>
+ ! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[cmp1:.*]] = arith.cmpi sgt, %[[raw_l1]], %[[c0_i32]] : i32
+ ! CHECK: %[[l1:.*]] = arith.select %[[cmp1]], %[[raw_l1]], %[[c0_i32]] : i32
+ ! CHECK: %[[sNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.char<1,?>>
+ ! CHECK: %[[sInitBox:.*]] = fir.embox %[[sNullAddr]] typeparams %[[l1]] : (!fir.heap<!fir.char<1,?>>, i32) -> !fir.box<!fir.heap<!fir.char<1,?>>>
+ ! CHECK: fir.store %[[sInitBox]] to %[[sBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.char<1,?>>>>
+
+ character(l2), allocatable :: zarray(:)
+ ! CHECK: %[[aBoxAddr:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>> {{{.*}}uniq_name = "_QFchar_explicit_dynEzarray"}
+ ! CHECK: %[[raw_l2:.*]] = fir.load %arg2 : !fir.ref<i32>
+ ! CHECK: %[[c0_i32_2:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[cmp2:.*]] = arith.cmpi sgt, %[[raw_l2]], %[[c0_i32_2]] : i32
+ ! CHECK: %[[l2:.*]] = arith.select %[[cmp2]], %[[raw_l2]], %[[c0_i32_2]] : i32
+ ! CHECK: %[[aNullAddr:.*]] = fir.zero_bits !fir.heap<!fir.array<?x!fir.char<1,?>>>
+ ! CHECK: %[[aNullShape:.*]] = fir.shape %c0{{.*}} : (index) -> !fir.shape<1>
+ ! CHECK: %[[aInitBox:.*]] = fir.embox %[[aNullAddr]](%[[aNullShape]]) typeparams %[[l2]] : (!fir.heap<!fir.array<?x!fir.char<1,?>>>, !fir.shape<1>, i32) -> !fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>
+ ! CHECK: fir.store %[[aInitBox]] to %[[aBoxAddr]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?x!fir.char<1,?>>>>>
+ allocate(scalar, zarray(20))
+ ! CHECK-NOT: AllocatableInitCharacter
+ ! CHECK: AllocatableAllocate
+ ! CHECK-NOT: AllocatableInitCharacter
+ ! CHECK: AllocatableAllocate
+ deallocate(scalar, zarray)
+ ! CHECK: AllocatableDeallocate
+ ! CHECK: AllocatableDeallocate
+end subroutine
subroutine char_explicit_dyn(l1, l2)
integer :: l1, l2
character(l1), allocatable :: c
- ! CHECK-DAG: %[[cLen:.*]] = fir.load %arg0 : !fir.ref<i32>
- ! CHECK-DAG: %[[cAddrVar:.*]] = fir.alloca !fir.heap<!fir.char<1,?>> {{{.*}}uniq_name = "_QFchar_explicit_dynEc.addr"}
+ ! CHECK: %[[l1:.*]] = fir.load %arg0 : !fir.ref<i32>
+ ! CHECK: %[[c0_i32:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[cmp:.*]] = arith.cmpi sgt, %[[l1]], %[[c0_i32]] : i32
+ ! CHECK: %[[cLen:.*]] = arith.select %[[cmp]], %[[l1]], %[[c0_i32]] : i32
+ ! CHECK: %[[cAddrVar:.*]] = fir.alloca !fir.heap<!fir.char<1,?>> {{{.*}}uniq_name = "_QFchar_explicit_dynEc.addr"}
! CHECK-NOT: "_QFchar_explicit_dynEc.len"
allocate(c)
! CHECK: %[[cLenCast1:.*]] = fir.convert %[[cLen]] : (i32) -> index
--- /dev/null
+! Test lowering of character function dummy procedure. The length must be
+! passed along the function address.
+! RUN: bbc -emit-fir %s -o - | FileCheck %s
+
+! -----------------------------------------------------------------------------
+! Test passing a character function as dummy procedure
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QPcst_len
+subroutine cst_len()
+ interface
+ character(7) function bar1()
+ end function
+ end interface
+ call foo1(bar1)
+ ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QPbar1) : (!fir.ref<!fir.char<1,7>>, index) -> !fir.boxchar<1>
+ ! CHECK: %[[VAL_1:.*]] = arith.constant 7 : i64
+ ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_0]] : ((!fir.ref<!fir.char<1,7>>, index) -> !fir.boxchar<1>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_3]], %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_1]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo1(%[[VAL_5]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcst_len_array
+ subroutine cst_len_array()
+ interface
+ function bar1_array()
+ character(7) :: bar1_array(10)
+ end function
+ end interface
+ ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QPbar1_array) : () -> !fir.array<10x!fir.char<1,7>>
+ ! CHECK: %[[VAL_1:.*]] = arith.constant 7 : i64
+ ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_0]] : (() -> !fir.array<10x!fir.char<1,7>>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_3]], %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_1]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo1b(%[[VAL_5]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo1b(bar1_array)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcst_len_2
+ subroutine cst_len_2()
+ character(7) :: bar2
+ external :: bar2
+ ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QPbar2) : (!fir.ref<!fir.char<1,7>>, index) -> !fir.boxchar<1>
+ ! CHECK: %[[VAL_1:.*]] = arith.constant 7 : i64
+ ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_0]] : ((!fir.ref<!fir.char<1,7>>, index) -> !fir.boxchar<1>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_3]], %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_1]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo2(%[[VAL_5]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo2(bar2)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPdyn_len(
+ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>{{.*}}) {
+ subroutine dyn_len(n)
+ integer :: n
+ character(n) :: bar3
+ external :: bar3
+ ! CHECK: %[[VAL_1:.*]] = fir.address_of(@_QPbar3) : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+ ! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+ ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_2]] : (i32) -> i64
+ ! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i64
+ ! CHECK: %[[VAL_5:.*]] = arith.cmpi sgt, %[[VAL_3]], %[[VAL_4]] : i64
+ ! CHECK: %[[VAL_6:.*]] = arith.select %[[VAL_5]], %[[VAL_3]], %[[VAL_4]] : i64
+ ! CHECK: %[[VAL_7:.*]] = fir.emboxproc %[[VAL_1]] : ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_8:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_9:.*]] = fir.insert_value %[[VAL_8]], %[[VAL_7]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_10:.*]] = fir.insert_value %[[VAL_9]], %[[VAL_6]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo3(%[[VAL_10]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo3(bar3)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcannot_compute_len_yet
+ subroutine cannot_compute_len_yet()
+ interface
+ function bar4(n)
+ integer :: n
+ character(n) :: bar4
+ end function
+ end interface
+ ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QPbar4) : (!fir.ref<!fir.char<1,?>>, index, !fir.ref<i32>) -> !fir.boxchar<1>
+ ! CHECK: %[[VAL_1:.*]] = arith.constant -1 : index
+ ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_0]] : ((!fir.ref<!fir.char<1,?>>, index, !fir.ref<i32>) -> !fir.boxchar<1>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (index) -> i64
+ ! CHECK: %[[VAL_4:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_6:.*]] = fir.insert_value %[[VAL_5]], %[[VAL_3]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo4(%[[VAL_6]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo4(bar4)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcannot_compute_len_yet_2
+ subroutine cannot_compute_len_yet_2()
+ character(*) :: bar5
+ external :: bar5
+ ! CHECK: %[[VAL_0:.*]] = fir.address_of(@_QPbar5) : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+ ! CHECK: %[[VAL_1:.*]] = arith.constant -1 : index
+ ! CHECK: %[[VAL_2:.*]] = fir.emboxproc %[[VAL_0]] : ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.convert %[[VAL_1]] : (index) -> i64
+ ! CHECK: %[[VAL_4:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_6:.*]] = fir.insert_value %[[VAL_5]], %[[VAL_3]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo5(%[[VAL_6]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo5(bar5)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPforward_incoming_length
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) {
+ subroutine forward_incoming_length(bar6)
+ character(*) :: bar6
+ external :: bar6
+ ! CHECK: %[[VAL_1:.*]] = fir.extract_value %[[VAL_0]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_2:.*]] = fir.extract_value %[[VAL_0]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64
+ ! CHECK: %[[WAL_1:.*]] = fir.emboxproc %[[WAL_2]] : (() -> ()) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_3]], %[[WAL_1]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_2]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo6(%[[VAL_5]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo6(bar6)
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPoverride_incoming_length
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) {
+ subroutine override_incoming_length(bar7)
+ character(7) :: bar7
+ external :: bar7
+ ! CHECK: %[[VAL_1:.*]] = fir.extract_value %[[VAL_0]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_2:.*]] = fir.box_addr %[[VAL_1]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_2:.*]] = arith.constant 7 : i64
+ ! CHECK: %[[WAL_1:.*]] = fir.emboxproc %[[WAL_2]] : (() -> ()) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[VAL_3:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_4:.*]] = fir.insert_value %[[VAL_3]], %[[WAL_1]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.insert_value %[[VAL_4]], %[[VAL_2]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+ ! CHECK: fir.call @_QPfoo7(%[[VAL_5]]) : (tuple<!fir.boxproc<() -> ()>, i64>) -> ()
+ call foo7(bar7)
+ end subroutine
+
+ ! -----------------------------------------------------------------------------
+ ! Test calling character dummy function
+ ! -----------------------------------------------------------------------------
+
+ ! CHECK-LABEL: func @_QPcall_assumed_length
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) {
+ subroutine call_assumed_length(bar8)
+ character(*) :: bar8
+ external :: bar8
+ ! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_0]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_2:.*]] = fir.box_addr %[[VAL_3]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_0]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64
+ ! CHECK: %[[VAL_6:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_4]] : i64) {bindc_name = ".result"}
+ ! CHECK: %[[VAL_7:.*]] = fir.convert %[[WAL_2]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index, !fir.ref<i32>) -> !fir.boxchar<1>)
+ ! CHECK: %[[VAL_8:.*]] = fir.convert %[[VAL_4]] : (i64) -> index
+ ! CHECK: fir.call %[[VAL_7]](%[[VAL_6]], %[[VAL_8]], %{{.*}}) : (!fir.ref<!fir.char<1,?>>, index, !fir.ref<i32>) -> !fir.boxchar<1>
+ call test(bar8(42))
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcall_explicit_length
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) {
+ subroutine call_explicit_length(bar9)
+ character(7) :: bar9
+ external :: bar9
+ ! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.char<1,7> {bindc_name = ".result"}
+ ! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_0]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_1:.*]] = fir.box_addr %[[VAL_4]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_5:.*]] = arith.constant 7 : i64
+ ! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i64) -> index
+ ! CHECK: %[[VAL_8:.*]] = fir.convert %[[WAL_1]] : (() -> ()) -> ((!fir.ref<!fir.char<1,7>>, index, !fir.ref<i32>) -> !fir.boxchar<1>)
+ ! CHECK: fir.call %[[VAL_8]](%[[VAL_1]], %[[VAL_6]], %{{.*}}) : (!fir.ref<!fir.char<1,7>>, index, !fir.ref<i32>) -> !fir.boxchar<1>
+ call test(bar9(42))
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPcall_explicit_length_with_iface
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) {
+ subroutine call_explicit_length_with_iface(bar10)
+ interface
+ function bar10(n)
+ integer(8) :: n
+ character(n) :: bar10
+ end function
+ end interface
+ ! CHECK: %[[VAL_1:.*]] = fir.alloca i64
+ ! CHECK: %[[VAL_2:.*]] = arith.constant 42 : i64
+ ! CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref<i64>
+ ! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_0]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_1:.*]] = fir.box_addr %[[VAL_3]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_1]] : !fir.ref<i64>
+ ! CHECK: %[[VAL_5:.*]] = fir.convert %[[VAL_4]] : (i64) -> index
+ ! CHECK: %[[VAL_6:.*]] = fir.call @llvm.stacksave() : () -> !fir.ref<i8>
+ ! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_5]] : index) {bindc_name = ".result"}
+ ! CHECK: %[[VAL_8:.*]] = fir.convert %[[WAL_1]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index, !fir.ref<i64>) -> !fir.boxchar<1>)
+ ! CHECK: fir.call %[[VAL_8]](%[[VAL_7]], %[[VAL_5]], %[[VAL_1]]) : (!fir.ref<!fir.char<1,?>>, index, !fir.ref<i64>) -> !fir.boxchar<1>
+ call test(bar10(42_8))
+ end subroutine
+
+
+ ! CHECK-LABEL: func @_QPhost(
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64>
+ subroutine host(f)
+ character*(*) :: f
+ external :: f
+ ! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_1:.*]], %{{.*}} : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: fir.store %[[VAL_0]] to %[[VAL_3]] : !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: fir.call @_QFhostPintern(%[[VAL_1]])
+ call intern()
+ contains
+ ! CHECK-LABEL: func @_QFhostPintern(
+ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc})
+ subroutine intern()
+ ! CHECK: %[[VAL_1:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[VAL_2:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_1]] : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_3]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_1:.*]] = fir.box_addr %[[VAL_4]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_5:.*]] = fir.extract_value %[[VAL_3]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64
+ ! CHECK: %[[VAL_7:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_5]] : i64) {bindc_name = ".result"}
+ ! CHECK: %[[VAL_8:.*]] = fir.convert %[[WAL_1]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>)
+ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_5]] : (i64) -> index
+ ! CHECK: fir.call %[[VAL_8]](%[[VAL_7]], %[[VAL_9]]) : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+ call test(f())
+ end subroutine
+ end subroutine
+
+ ! CHECK-LABEL: func @_QPhost2(
+ ! CHECK-SAME: %[[VAL_0:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc})
+ subroutine host2(f)
+ ! Test that dummy length is overridden by local length even when used
+ ! in the internal procedure.
+ character*(42) :: f
+ external :: f
+ ! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_1:.*]], %{{.*}} : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: fir.store %[[VAL_0]] to %[[VAL_3]] : !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: fir.call @_QFhost2Pintern(%[[VAL_1]])
+ call intern()
+ contains
+ ! CHECK-LABEL: func @_QFhost2Pintern(
+ ! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>> {fir.host_assoc})
+ subroutine intern()
+ ! CHECK: %[[VAL_1:.*]] = fir.alloca !fir.char<1,42> {bindc_name = ".result"}
+ ! CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+ ! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_0]], %[[VAL_2]] : (!fir.ref<tuple<tuple<!fir.boxproc<() -> ()>, i64>>>, i32) -> !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_3]] : !fir.ref<tuple<!fir.boxproc<() -> ()>, i64>>
+ ! CHECK: %[[VAL_5:.*]] = fir.extract_value %[[VAL_4]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+ ! CHECK: %[[WAL_1:.*]] = fir.box_addr %[[VAL_5]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+ ! CHECK: %[[VAL_6:.*]] = arith.constant 42 : i64
+ ! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i64) -> index
+ ! CHECK: %[[VAL_9:.*]] = fir.convert %[[WAL_1]] : (() -> ()) -> ((!fir.ref<!fir.char<1,42>>, index) -> !fir.boxchar<1>)
+ ! CHECK: fir.call %[[VAL_9]](%[[VAL_1]], %[[VAL_7]]) : (!fir.ref<!fir.char<1,42>>, index) -> !fir.boxchar<1>
+ call test(f())
+ end subroutine
+ end subroutine
--- /dev/null
+! RUN: bbc -emit-fir %s -o - | FileCheck %s
+
+! Test dummy procedures
+
+! Test of dummy procedure call
+! CHECK-LABEL: func @_QPfoo(
+! CHECK-SAME: %{{.*}}: !fir.boxproc<() -> ()>{{.*}}) -> f32
+real function foo(bar)
+real :: bar, x
+! CHECK: %[[x:.*]] = fir.alloca f32 {{{.*}}uniq_name = "{{.*}}Ex"}
+x = 42.
+! CHECK: %[[funccast:.*]] = fir.box_addr %arg0 : (!fir.boxproc<() -> ()>) -> ((!fir.ref<f32>) -> f32)
+! CHECK: fir.call %[[funccast]](%[[x]]) : (!fir.ref<f32>) -> f32
+foo = bar(x)
+end function
+
+! Test case where dummy procedure is only transiting.
+! CHECK-LABEL: func @_QPprefoo(
+! CHECK-SAME: %{{.*}}: !fir.boxproc<() -> ()>{{.*}}) -> f32
+real function prefoo(bar)
+external :: bar
+! CHECK: fir.call @_QPfoo(%arg0) : (!fir.boxproc<() -> ()>) -> f32
+prefoo = foo(bar)
+end function
+
+! Function that will be passed as dummy argument
+! CHECK-LABEL: func @_QPfunc(
+! CHECK-SAME: %{{.*}}: !fir.ref<f32>{{.*}}) -> f32
+real function func(x)
+real :: x
+func = x + 0.5
+end function
+
+! Test passing functions as dummy procedure arguments
+! CHECK-LABEL: func @_QPtest_func
+real function test_func()
+real :: func, prefoo
+external :: func
+!CHECK: %[[f:.*]] = fir.address_of(@_QPfunc) : (!fir.ref<f32>) -> f32
+!CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<f32>) -> f32) -> !fir.boxproc<() -> ()>
+!CHECK: fir.call @_QPprefoo(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> f32
+test_func = prefoo(func)
+end function
+
+! Repeat test with dummy subroutine
+
+! CHECK-LABEL: func @_QPfoo_sub(
+! CHECK-SAME: %{{.*}}: !fir.boxproc<() -> ()>{{.*}})
+subroutine foo_sub(bar_sub)
+! CHECK: %[[x:.*]] = fir.alloca f32 {{{.*}}uniq_name = "{{.*}}Ex"}
+x = 42.
+! CHECK: %[[funccast:.*]] = fir.box_addr %arg0 : (!fir.boxproc<() -> ()>) -> ((!fir.ref<f32>) -> ())
+! CHECK: fir.call %[[funccast]](%[[x]]) : (!fir.ref<f32>)
+call bar_sub(x)
+end subroutine
+
+! Test case where dummy procedure is only transiting.
+! CHECK-LABEL: func @_QPprefoo_sub(
+! CHECK-SAME: %{{.*}}: !fir.boxproc<() -> ()>{{.*}})
+subroutine prefoo_sub(bar_sub)
+external :: bar_sub
+! CHECK: fir.call @_QPfoo_sub(%arg0) : (!fir.boxproc<() -> ()>) -> ()
+call foo_sub(bar_sub)
+end subroutine
+
+! Subroutine that will be passed as dummy argument
+! CHECK-LABEL: func @_QPsub(
+! CHECK-SAME: %{{.*}}: !fir.ref<f32>{{.*}})
+subroutine sub(x)
+real :: x
+print *, x
+end subroutine
+
+! Test passing functions as dummy procedure arguments
+! CHECK-LABEL: func @_QPtest_sub
+subroutine test_sub()
+external :: sub
+!CHECK: %[[f:.*]] = fir.address_of(@_QPsub) : (!fir.ref<f32>) -> ()
+!CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<f32>) -> ()) -> !fir.boxproc<() -> ()>
+!CHECK: fir.call @_QPprefoo_sub(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call prefoo_sub(sub)
+end subroutine
+
+! CHECK-LABEL: func @_QPpassing_not_defined_in_file()
+subroutine passing_not_defined_in_file()
+external proc_not_defined_in_file
+! CHECK: %[[addr:.*]] = fir.address_of(@_QPproc_not_defined_in_file) : () -> ()
+! CHECK: %[[ep:.*]] = fir.emboxproc %[[addr]]
+! CHECK: fir.call @_QPprefoo_sub(%[[ep]]) : (!fir.boxproc<() -> ()>) -> ()
+call prefoo_sub(proc_not_defined_in_file)
+end subroutine
+
+! Test passing unrestricted intrinsics
+
+! Intrinsic using runtime
+! CHECK-LABEL: func @_QPtest_acos
+subroutine test_acos(x)
+intrinsic :: acos
+!CHECK: %[[f:.*]] = fir.address_of(@fir.acos.f32.ref_f32) : (!fir.ref<f32>) -> f32
+!CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<f32>) -> f32) -> !fir.boxproc<() -> ()>
+!CHECK: fir.call @_QPfoo_acos(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call foo_acos(acos)
+end subroutine
+
+! CHECK-LABEL: func @_QPtest_atan2
+subroutine test_atan2()
+intrinsic :: atan2
+! CHECK: %[[f:.*]] = fir.address_of(@fir.atan2.f32.ref_f32.ref_f32) : (!fir.ref<f32>, !fir.ref<f32>) -> f32
+! CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<f32>, !fir.ref<f32>) -> f32) -> !fir.boxproc<() -> ()>
+! CHECK: fir.call @_QPfoo_atan2(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call foo_atan2(atan2)
+end subroutine
+
+! Intrinsic implemented inlined
+! CHECK-LABEL: func @_QPtest_aimag
+subroutine test_aimag()
+intrinsic :: aimag
+!CHECK: %[[f:.*]] = fir.address_of(@fir.aimag.f32.ref_z4) : (!fir.ref<!fir.complex<4>>) -> f32
+!CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<!fir.complex<4>>) -> f32) -> !fir.boxproc<() -> ()>
+!CHECK: fir.call @_QPfoo_aimag(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call foo_aimag(aimag)
+end subroutine
+
+! Character Intrinsic implemented inlined
+! CHECK-LABEL: func @_QPtest_len
+subroutine test_len()
+intrinsic :: len
+! CHECK: %[[f:.*]] = fir.address_of(@fir.len.i32.bc1) : (!fir.boxchar<1>) -> i32
+! CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.boxchar<1>) -> i32) -> !fir.boxproc<() -> ()>
+!CHECK: fir.call @_QPfoo_len(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call foo_len(len)
+end subroutine
+
+! Intrinsic implemented inlined with specific name different from generic
+! CHECK-LABEL: func @_QPtest_iabs
+subroutine test_iabs()
+intrinsic :: iabs
+! CHECK: %[[f:.*]] = fir.address_of(@fir.abs.i32.ref_i32) : (!fir.ref<i32>) -> i32
+! CHECK: %[[fcast:.*]] = fir.emboxproc %[[f]] : ((!fir.ref<i32>) -> i32) -> !fir.boxproc<() -> ()>
+! CHECK: fir.call @_QPfoo_iabs(%[[fcast]]) : (!fir.boxproc<() -> ()>) -> ()
+call foo_iabs(iabs)
+end subroutine
+
+! TODO: exhaustive test of unrestricted intrinsic table 16.2
+
+! TODO: improve dummy procedure types when interface is given.
+! CHECK: func @_QPtodo3(
+! CHECK-SAME: %{{.*}}: !fir.boxproc<() -> ()>{{.*}})
+! SHOULD-CHECK: func @_QPtodo3(%arg0: (!fir.ref<f32>) -> f32)
+subroutine todo3(dummy_proc)
+intrinsic :: acos
+procedure(acos) :: dummy_proc
+end subroutine
+
+! CHECK-LABEL: func private @fir.acos.f32.ref_f32(%arg0: !fir.ref<f32>) -> f32
+!CHECK: %[[load:.*]] = fir.load %arg0
+!CHECK: %[[res:.*]] = fir.call @__fs_acos_1(%[[load]]) : (f32) -> f32
+!CHECK: return %[[res]] : f32
+
+! CHECK-LABEL: func private @fir.atan2.f32.ref_f32.ref_f32(
+! CHECK-SAME: %[[x:.*]]: !fir.ref<f32>, %[[y:.*]]: !fir.ref<f32>) -> f32
+! CHECK-DAG: %[[xload:.*]] = fir.load %[[x]] : !fir.ref<f32>
+! CHECK-DAG: %[[yload:.*]] = fir.load %[[y]] : !fir.ref<f32>
+! CHECK: %[[atan2:.*]] = fir.call @__fs_atan2_1(%[[xload]], %[[yload]]) : (f32, f32) -> f32
+! CHECK: return %[[atan2]] : f32
+
+!CHECK-LABEL: func private @fir.aimag.f32.ref_z4(%arg0: !fir.ref<!fir.complex<4>>)
+!CHECK: %[[load:.*]] = fir.load %arg0
+!CHECK: %[[imag:.*]] = fir.extract_value %[[load]], [1 : index] : (!fir.complex<4>) -> f32
+!CHECK: return %[[imag]] : f32
+
+!CHECK-LABEL: func private @fir.len.i32.bc1(%arg0: !fir.boxchar<1>)
+!CHECK: %[[unboxed:.*]]:2 = fir.unboxchar %arg0 : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
+!CHECK: %[[len:.*]] = fir.convert %[[unboxed]]#1 : (index) -> i32
+!CHECK: return %[[len]] : i32
! Test internal procedure host association lowering.
-! RUN: bbc %s -o - -emit-fir | FileCheck %s
+! RUN: bbc %s -o - | FileCheck %s
! -----------------------------------------------------------------------------
! Test non character intrinsic scalars
c = "Hi there"
end subroutine test6_inner
end subroutine test6
+
+! -----------------------------------------------------------------------------
+! Test non allocatable and pointer arrays
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QPtest3(
+! CHECK-SAME: %[[p:[^:]+]]: !fir.box<!fir.array<?xf32>>{{.*}}, %[[q:.*]]: !fir.box<!fir.array<?xf32>>{{.*}}, %[[i:.*]]: !fir.ref<i64>
+subroutine test3(p,q,i)
+ integer(8) :: i
+ real :: p(i:)
+ real :: q(:)
+ ! CHECK: %[[iload:.*]] = fir.load %[[i]] : !fir.ref<i64>
+ ! CHECK: %[[icast:.*]] = fir.convert %[[iload]] : (i64) -> index
+ ! CHECK: %[[tup:.*]] = fir.alloca tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[pshift:.*]] = fir.shift %[[icast]] : (index) -> !fir.shift<1>
+ ! CHECK: %[[pbox:.*]] = fir.rebox %[[p]](%[[pshift]]) : (!fir.box<!fir.array<?xf32>>, !fir.shift<1>) -> !fir.box<!fir.array<?xf32>>
+ ! CHECK: fir.store %[[pbox]] to %[[ptup]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[qtup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[qbox:.*]] = fir.rebox %[[q]] : (!fir.box<!fir.array<?xf32>>) -> !fir.box<!fir.array<?xf32>>
+ ! CHECK: fir.store %[[qbox]] to %[[qtup]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
+
+ i = i + 1
+ q = -42.0
+
+ ! CHECK: fir.call @_QFtest3Ptest3_inner(%[[tup]]) : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>) -> ()
+ call test3_inner
+
+ if (p(2) .ne. -42.0) then
+ print *, "failed"
+ end if
+
+contains
+ ! CHECK-LABEL: func @_QFtest3Ptest3_inner(
+ ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>> {fir.host_assoc}) {
+ subroutine test3_inner
+ ! CHECK: %[[pcoor:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[p:.*]] = fir.load %[[pcoor]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[pbounds:.]]:3 = fir.box_dims %[[p]], %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+ ! CHECK: %[[qcoor:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<?xf32>>, !fir.box<!fir.array<?xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[q:.*]] = fir.load %[[qcoor]] : !fir.ref<!fir.box<!fir.array<?xf32>>>
+ ! CHECK: %[[qbounds:.]]:3 = fir.box_dims %[[q]], %c0{{.*}} : (!fir.box<!fir.array<?xf32>>, index) -> (index, index, index)
+
+
+ ! CHECK: %[[qlb:.*]] = fir.convert %[[qbounds]]#0 : (index) -> i64
+ ! CHECK: %[[qoffset:.*]] = arith.subi %c1{{.*}}, %[[qlb]] : i64
+ ! CHECK: %[[qelt:.*]] = fir.coordinate_of %[[q]], %[[qoffset]] : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+ ! CHECK: %[[qload:.*]] = fir.load %[[qelt]] : !fir.ref<f32>
+ ! CHECK: %[[plb:.*]] = fir.convert %[[pbounds]]#0 : (index) -> i64
+ ! CHECK: %[[poffset:.*]] = arith.subi %c2{{.*}}, %[[plb]] : i64
+ ! CHECK: %[[pelt:.*]] = fir.coordinate_of %[[p]], %[[poffset]] : (!fir.box<!fir.array<?xf32>>, i64) -> !fir.ref<f32>
+ ! CHECK: fir.store %[[qload]] to %[[pelt]] : !fir.ref<f32>
+ p(2) = q(1)
+ end subroutine test3_inner
+end subroutine test3
+
+! CHECK-LABEL: func @_QPtest3a(
+! CHECK-SAME: %[[p:.*]]: !fir.ref<!fir.array<10xf32>>{{.*}}) {
+subroutine test3a(p)
+ real :: p(10)
+ real :: q(10)
+ ! CHECK: %[[q:.*]] = fir.alloca !fir.array<10xf32> {bindc_name = "q", uniq_name = "_QFtest3aEq"}
+ ! CHECK: %[[tup:.*]] = fir.alloca tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[shape:.*]] = fir.shape %c10{{.*}} : (index) -> !fir.shape<1>
+ ! CHECK: %[[pbox:.*]] = fir.embox %[[p]](%[[shape]]) : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> !fir.box<!fir.array<10xf32>>
+ ! CHECK: fir.store %[[pbox]] to %[[ptup]] : !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[qtup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[qbox:.*]] = fir.embox %[[q]](%[[shape]]) : (!fir.ref<!fir.array<10xf32>>, !fir.shape<1>) -> !fir.box<!fir.array<10xf32>>
+ ! CHECK: fir.store %[[qbox]] to %[[qtup]] : !fir.ref<!fir.box<!fir.array<10xf32>>>
+
+ q = -42.0
+ ! CHECK: fir.call @_QFtest3aPtest3a_inner(%[[tup]]) : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>) -> ()
+ call test3a_inner
+
+ if (p(1) .ne. -42.0) then
+ print *, "failed"
+ end if
+
+contains
+ ! CHECK: func @_QFtest3aPtest3a_inner(
+ ! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>> {fir.host_assoc}) {
+ subroutine test3a_inner
+ ! CHECK: %[[pcoor:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[p:.*]] = fir.load %[[pcoor]] : !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[paddr:.*]] = fir.box_addr %[[p]] : (!fir.box<!fir.array<10xf32>>) -> !fir.ref<!fir.array<10xf32>>
+ ! CHECK: %[[qcoor:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.box<!fir.array<10xf32>>, !fir.box<!fir.array<10xf32>>>>, i32) -> !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[q:.*]] = fir.load %[[qcoor]] : !fir.ref<!fir.box<!fir.array<10xf32>>>
+ ! CHECK: %[[qaddr:.*]] = fir.box_addr %[[q]] : (!fir.box<!fir.array<10xf32>>) -> !fir.ref<!fir.array<10xf32>>
+
+ ! CHECK: %[[qelt:.*]] = fir.coordinate_of %[[qaddr]], %c0{{.*}} : (!fir.ref<!fir.array<10xf32>>, i64) -> !fir.ref<f32>
+ ! CHECK: %[[qload:.*]] = fir.load %[[qelt]] : !fir.ref<f32>
+ ! CHECK: %[[pelt:.*]] = fir.coordinate_of %[[paddr]], %c0{{.*}} : (!fir.ref<!fir.array<10xf32>>, i64) -> !fir.ref<f32>
+ ! CHECK: fir.store %[[qload]] to %[[pelt]] : !fir.ref<f32>
+ p(1) = q(1)
+ end subroutine test3a_inner
+end subroutine test3a
+
+! -----------------------------------------------------------------------------
+! Test allocatable and pointer scalars
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QPtest4() {
+subroutine test4
+ real, pointer :: p
+ real, allocatable, target :: ally
+ ! CHECK: %[[ally:.*]] = fir.alloca !fir.box<!fir.heap<f32>> {bindc_name = "ally", fir.target, uniq_name = "_QFtest4Eally"}
+ ! CHECK: %[[p:.*]] = fir.alloca !fir.box<!fir.ptr<f32>> {bindc_name = "p", uniq_name = "_QFtest4Ep"}
+ ! CHECK: %[[tup:.*]] = fir.alloca tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
+ ! CHECK: fir.store %[[p]] to %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
+ ! CHECK: %[[atup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<f32>>>>
+ ! CHECK: fir.store %[[ally]] to %[[atup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<f32>>>>
+ ! CHECK: fir.call @_QFtest4Ptest4_inner(%[[tup]]) : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>) -> ()
+
+ allocate(ally)
+ ally = -42.0
+ call test4_inner
+
+ if (p .ne. -42.0) then
+ print *, "failed"
+ end if
+
+contains
+ ! CHECK-LABEL: func @_QFtest4Ptest4_inner(
+ ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>> {fir.host_assoc}) {
+ subroutine test4_inner
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
+ ! CHECK: %[[p:.*]] = fir.load %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<f32>>>>
+ ! CHECK: %[[atup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<f32>>>>
+ ! CHECK: %[[a:.*]] = fir.load %[[atup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<f32>>>>
+ ! CHECK: %[[abox:.*]] = fir.load %[[a]] : !fir.ref<!fir.box<!fir.heap<f32>>>
+ ! CHECK: %[[addr:.*]] = fir.box_addr %[[abox]] : (!fir.box<!fir.heap<f32>>) -> !fir.heap<f32>
+ ! CHECK: %[[ptr:.*]] = fir.embox %[[addr]] : (!fir.heap<f32>) -> !fir.box<!fir.ptr<f32>>
+ ! CHECK: fir.store %[[ptr]] to %[[p]] : !fir.ref<!fir.box<!fir.ptr<f32>>>
+ p => ally
+ end subroutine test4_inner
+end subroutine test4
+
+! -----------------------------------------------------------------------------
+! Test allocatable and pointer arrays
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QPtest5() {
+subroutine test5
+ real, pointer :: p(:)
+ real, allocatable, target :: ally(:)
+
+ ! CHECK: %[[ally:.*]] = fir.alloca !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "ally", fir.target
+ ! CHECK: %[[p:.*]] = fir.alloca !fir.box<!fir.ptr<!fir.array<?xf32>>> {bindc_name = "p"
+ ! CHECK: %[[tup:.*]] = fir.alloca tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
+ ! CHECK: fir.store %[[p]] to %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
+ ! CHECK: %[[atup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>
+ ! CHECK: fir.store %[[ally]] to %[[atup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>
+ ! CHECK: fir.call @_QFtest5Ptest5_inner(%[[tup]]) : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>) -> ()
+
+ allocate(ally(10))
+ ally = -42.0
+ call test5_inner
+
+ if (p(1) .ne. -42.0) then
+ print *, "failed"
+ end if
+
+contains
+ ! CHECK-LABEL: func @_QFtest5Ptest5_inner(
+ ! CHECK-SAME:%[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>> {fir.host_assoc}) {
+ subroutine test5_inner
+ ! CHECK: %[[ptup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
+ ! CHECK: %[[p:.*]] = fir.load %[[ptup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>>
+ ! CHECK: %[[atup:.*]] = fir.coordinate_of %[[tup]], %c1{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>
+ ! CHECK: %[[a:.*]] = fir.load %[[atup]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>>
+ ! CHECK: %[[abox:.*]] = fir.load %[[a]] : !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+ ! CHECK-DAG: %[[adims:.*]]:3 = fir.box_dims %[[abox]], %c0{{.*}} : (!fir.box<!fir.heap<!fir.array<?xf32>>>, index) -> (index, index, index)
+ ! CHECK-DAG: %[[addr:.*]] = fir.box_addr %[[abox]] : (!fir.box<!fir.heap<!fir.array<?xf32>>>) -> !fir.heap<!fir.array<?xf32>>
+ ! CHECK-DAG: %[[ashape:.*]] = fir.shape_shift %[[adims]]#0, %[[adims]]#1 : (index, index) -> !fir.shapeshift<1>
+
+ ! CHECK: %[[ptr:.*]] = fir.embox %[[addr]](%[[ashape]]) : (!fir.heap<!fir.array<?xf32>>, !fir.shapeshift<1>) -> !fir.box<!fir.ptr<!fir.array<?xf32>>>
+ ! CHECK: fir.store %[[ptr]] to %[[p]] : !fir.ref<!fir.box<!fir.ptr<!fir.array<?xf32>>>>
+ p => ally
+ end subroutine test5_inner
+end subroutine test5
+
+
+! -----------------------------------------------------------------------------
+! Test elemental internal procedure
+! -----------------------------------------------------------------------------
+
+! CHECK-LABEL: func @_QPtest7(
+! CHECK-SAME: %[[j:.*]]: !fir.ref<i32>{{.*}}, %[[k:.*]]: !fir.box<!fir.array<?xi32>>
+subroutine test7(j, k)
+ implicit none
+ integer :: j
+ integer :: k(:)
+ ! CHECK: %[[tup:.*]] = fir.alloca tuple<!fir.ref<i32>>
+ ! CHECK: %[[jtup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: fir.store %[[j]] to %[[jtup]] : !fir.llvm_ptr<!fir.ref<i32>>
+
+ ! CHECK: %[[kelem:.*]] = fir.array_coor %[[k]] %{{.*}} : (!fir.box<!fir.array<?xi32>>, index) -> !fir.ref<i32>
+ ! CHECK: fir.call @_QFtest7Ptest7_inner(%[[kelem]], %[[tup]]) : (!fir.ref<i32>, !fir.ref<tuple<!fir.ref<i32>>>) -> i32
+ k = test7_inner(k)
+contains
+
+! CHECK-LABEL: func @_QFtest7Ptest7_inner(
+! CHECK-SAME: %[[i:.*]]: !fir.ref<i32>{{.*}}, %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) -> i32 {
+elemental integer function test7_inner(i)
+ implicit none
+ integer, intent(in) :: i
+ ! CHECK: %[[jtup:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: %[[jptr:.*]] = fir.load %[[jtup]] : !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK-DAG: %[[iload:.*]] = fir.load %[[i]] : !fir.ref<i32>
+ ! CHECK-DAG: %[[jload:.*]] = fir.load %[[jptr]] : !fir.ref<i32>
+ ! CHECK: addi %[[iload]], %[[jload]] : i32
+ test7_inner = i + j
+end function
+end subroutine
+
+subroutine issue990()
+ ! Test that host symbols used in statement functions inside an internal
+ ! procedure are correctly captured from the host.
+ implicit none
+ integer :: captured
+ call bar()
+contains
+! CHECK-LABEL: func @_QFissue990Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) {
+subroutine bar()
+ integer :: stmt_func, i
+ stmt_func(i) = i + captured
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: %[[addr:.*]] = fir.load %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: %[[value:.*]] = fir.load %[[addr]] : !fir.ref<i32>
+ ! CHECK: arith.addi %{{.*}}, %[[value]] : i32
+ print *, stmt_func(10)
+end subroutine
+end subroutine
+
+subroutine issue990b()
+ ! Test when an internal procedure uses a statement function from its host
+ ! which uses host variables that are otherwise not used by the internal
+ ! procedure.
+ implicit none
+ integer :: captured, captured_stmt_func, i
+ captured_stmt_func(i) = i + captured
+ call bar()
+contains
+! CHECK-LABEL: func @_QFissue990bPbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) {
+subroutine bar()
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: %[[addr:.*]] = fir.load %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<i32>>
+ ! CHECK: %[[value:.*]] = fir.load %[[addr]] : !fir.ref<i32>
+ ! CHECK: arith.addi %{{.*}}, %[[value]] : i32
+ print *, captured_stmt_func(10)
+end subroutine
+end subroutine
+
+! Test capture of dummy procedure functions.
+subroutine test8(dummy_proc)
+ implicit none
+ interface
+ real function dummy_proc(x)
+ real :: x
+ end function
+ end interface
+ call bar()
+contains
+! CHECK-LABEL: func @_QFtest8Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) {
+subroutine bar()
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.boxproc<() -> ()>>>, i32) -> !fir.ref<!fir.boxproc<() -> ()>>
+ ! CHECK: %[[dummyProc:.*]] = fir.load %[[tupAddr]] : !fir.ref<!fir.boxproc<() -> ()>>
+ ! CHECK: %[[dummyProcCast:.*]] = fir.box_addr %[[dummyProc]] : (!fir.boxproc<() -> ()>) -> ((!fir.ref<f32>) -> f32)
+ ! CHECK: fir.call %[[dummyProcCast]](%{{.*}}) : (!fir.ref<f32>) -> f32
+ print *, dummy_proc(42.)
+end subroutine
+end subroutine
+
+! Test capture of dummy subroutines.
+subroutine test9(dummy_proc)
+ implicit none
+ interface
+ subroutine dummy_proc()
+ end subroutine
+ end interface
+ call bar()
+contains
+! CHECK-LABEL: func @_QFtest9Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.boxproc<() -> ()>>> {fir.host_assoc}) {
+subroutine bar()
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.boxproc<() -> ()>>>, i32) -> !fir.ref<!fir.boxproc<() -> ()>>
+ ! CHECK: %[[dummyProc:.*]] = fir.load %[[tupAddr]] : !fir.ref<!fir.boxproc<() -> ()>>
+ ! CHECK: %[[pa:.*]] = fir.box_addr %[[dummyProc]]
+ ! CHECK: fir.call %[[pa]]() : () -> ()
+ call dummy_proc()
+end subroutine
+end subroutine
+
+! Test capture of namelist
+! CHECK-LABEL: func @_QPtest10(
+! CHECK-SAME: %[[i:.*]]: !fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>{{.*}}) {
+subroutine test10(i)
+ implicit none
+ integer, pointer :: i(:)
+ namelist /a_namelist/ i
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup:.*]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
+ ! CHECK: fir.store %[[i]] to %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
+ ! CHECK: fir.call @_QFtest10Pbar(%[[tup]]) : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>>) -> ()
+ call bar()
+contains
+! CHECK-LABEL: func @_QFtest10Pbar(
+! CHECK-SAME: %[[tup:.*]]: !fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>> {fir.host_assoc}) {
+subroutine bar()
+ ! CHECK: %[[tupAddr:.*]] = fir.coordinate_of %[[tup]], %c0{{.*}} : (!fir.ref<tuple<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>>, i32) -> !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
+ ! CHECK: fir.load %[[tupAddr]] : !fir.llvm_ptr<!fir.ref<!fir.box<!fir.ptr<!fir.array<?xi32>>>>>
+ read (88, NML = a_namelist)
+end subroutine
+end subroutine
+
+! Test passing an internal procedure as a dummy argument.
+
+! CHECK-LABEL: func @_QPtest_proc_dummy() {
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFtest_proc_dummyEi"}
+! CHECK: %[[VAL_5:.*]] = fir.alloca tuple<!fir.ref<i32>>
+! CHECK: %[[VAL_7:.*]] = fir.address_of(@_QFtest_proc_dummyPtest_proc_dummy_a) : (!fir.ref<i32>, !fir.ref<tuple<!fir.ref<i32>>>) -> ()
+! CHECK: %[[VAL_8:.*]] = fir.emboxproc %[[VAL_7]], %[[VAL_5]] : ((!fir.ref<i32>, !fir.ref<tuple<!fir.ref<i32>>>) -> (), !fir.ref<tuple<!fir.ref<i32>>>) -> !fir.boxproc<() -> ()>
+! CHECK: fir.call @_QPtest_proc_dummy_other(%[[VAL_8]]) : (!fir.boxproc<() -> ()>) -> ()
+
+! CHECK-LABEL: func @_QFtest_proc_dummyPtest_proc_dummy_a(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32> {fir.bindc_name = "j"},
+! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<tuple<!fir.ref<i32>>> {fir.host_assoc}) {
+! CHECK: %[[VAL_2:.*]] = arith.constant 0 : i32
+! CHECK: %[[VAL_3:.*]] = fir.coordinate_of %[[VAL_1]], %[[VAL_2]] : (!fir.ref<tuple<!fir.ref<i32>>>, i32) -> !fir.llvm_ptr<!fir.ref<i32>>
+! CHECK: %[[VAL_4:.*]] = fir.load %[[VAL_3]] : !fir.llvm_ptr<!fir.ref<i32>>
+! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_4]] : !fir.ref<i32>
+! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+! CHECK: %[[VAL_7:.*]] = arith.addi %[[VAL_5]], %[[VAL_6]] : i32
+! CHECK: fir.store %[[VAL_7]] to %[[VAL_4]] : !fir.ref<i32>
+! CHECK: return
+! CHECK: }
+
+! CHECK-LABEL: func @_QPtest_proc_dummy_other(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.boxproc<() -> ()>) {
+! CHECK: %[[VAL_1:.*]] = arith.constant 4 : i32
+! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {adapt.valuebyref}
+! CHECK: fir.store %[[VAL_1]] to %[[VAL_2]] : !fir.ref<i32>
+! CHECK: %[[VAL_3:.*]] = fir.box_addr %[[VAL_0]] : (!fir.boxproc<() -> ()>) -> ((!fir.ref<i32>) -> ())
+! CHECK: fir.call %[[VAL_3]](%[[VAL_2]]) : (!fir.ref<i32>) -> ()
+! CHECK: return
+! CHECK: }
+
+subroutine test_proc_dummy
+ integer i
+ i = 1
+ call test_proc_dummy_other(test_proc_dummy_a)
+ print *, i
+contains
+ subroutine test_proc_dummy_a(j)
+ i = i + j
+ end subroutine test_proc_dummy_a
+end subroutine test_proc_dummy
+
+subroutine test_proc_dummy_other(proc)
+ call proc(4)
+end subroutine test_proc_dummy_other
+
+! CHECK-LABEL: func @_QPtest_proc_dummy_char() {
+! CHECK-DAG: %[[VAL_0:.*]] = arith.constant 10 : index
+! CHECK-DAG: %[[VAL_1:.*]] = arith.constant 0 : i32
+! CHECK-DAG: %[[VAL_2:.*]] = arith.constant 9 : index
+! CHECK-DAG: %[[VAL_3:.*]] = arith.constant false
+! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 1 : index
+! CHECK-DAG: %[[VAL_5:.*]] = arith.constant 32 : i8
+! CHECK-DAG: %[[VAL_6:.*]] = arith.constant -1 : i32
+! CHECK-DAG: %[[VAL_8:.*]] = arith.constant 10 : i64
+! CHECK-DAG: %[[VAL_9:.*]] = arith.constant 40 : index
+! CHECK-DAG: %[[VAL_10:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_11:.*]] = fir.alloca !fir.char<1,40> {bindc_name = ".result"}
+! CHECK: %[[VAL_12:.*]] = fir.alloca !fir.char<1,10> {bindc_name = "message", uniq_name = "_QFtest_proc_dummy_charEmessage"}
+! CHECK: %[[VAL_13:.*]] = fir.alloca tuple<!fir.boxchar<1>>
+! CHECK: %[[VAL_14:.*]] = fir.coordinate_of %[[VAL_13]], %[[VAL_1]] : (!fir.ref<tuple<!fir.boxchar<1>>>, i32) -> !fir.ref<!fir.boxchar<1>>
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_12]] : (!fir.ref<!fir.char<1,10>>) -> !fir.ref<!fir.char<1,?>>
+! CHECK: %[[VAL_16:.*]] = fir.emboxchar %[[VAL_15]], %[[VAL_0]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+! CHECK: fir.store %[[VAL_16]] to %[[VAL_14]] : !fir.ref<!fir.boxchar<1>>
+! CHECK: %[[VAL_17:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref<!fir.char<1,9>>
+! CHECK: %[[VAL_18:.*]] = fir.convert %[[VAL_2]] : (index) -> i64
+! CHECK: %[[VAL_19:.*]] = fir.convert %[[VAL_12]] : (!fir.ref<!fir.char<1,10>>) -> !fir.ref<i8>
+! CHECK: %[[VAL_20:.*]] = fir.convert %[[VAL_17]] : (!fir.ref<!fir.char<1,9>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_19]], %[[VAL_20]], %[[VAL_18]], %[[VAL_3]]) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! CHECK: %[[VAL_21:.*]] = fir.undefined !fir.char<1>
+! CHECK: %[[VAL_22:.*]] = fir.insert_value %[[VAL_21]], %[[VAL_5]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1>
+! CHECK: br ^bb1(%[[VAL_2]], %[[VAL_4]] : index, index)
+! CHECK: ^bb1(%[[VAL_23:.*]]: index, %[[VAL_24:.*]]: index):
+! CHECK: %[[VAL_25:.*]] = arith.cmpi sgt, %[[VAL_24]], %[[VAL_10]] : index
+! CHECK: cond_br %[[VAL_25]], ^bb2, ^bb3
+! CHECK: ^bb2:
+! CHECK: %[[VAL_26:.*]] = fir.convert %[[VAL_12]] : (!fir.ref<!fir.char<1,10>>) -> !fir.ref<!fir.array<10x!fir.char<1>>>
+! CHECK: %[[VAL_27:.*]] = fir.coordinate_of %[[VAL_26]], %[[VAL_23]] : (!fir.ref<!fir.array<10x!fir.char<1>>>, index) -> !fir.ref<!fir.char<1>>
+! CHECK: fir.store %[[VAL_22]] to %[[VAL_27]] : !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_28:.*]] = arith.addi %[[VAL_23]], %[[VAL_4]] : index
+! CHECK: %[[VAL_29:.*]] = arith.subi %[[VAL_24]], %[[VAL_4]] : index
+! CHECK: br ^bb1(%[[VAL_28]], %[[VAL_29]] : index, index)
+! CHECK: ^bb3:
+! CHECK: %[[VAL_30:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref<!fir.char<1,
+! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_30]] : (!fir.ref<!fir.char<1,{{.*}}>>) -> !fir.ref<i8>
+! CHECK: %[[VAL_32:.*]] = fir.call @_FortranAioBeginExternalListOutput(%[[VAL_6]], %[[VAL_31]], %{{.*}}) : (i32, !fir.ref<i8>, i32) -> !fir.ref<i8>
+! CHECK: %[[VAL_33:.*]] = fir.address_of(@_QFtest_proc_dummy_charPgen_message) : (!fir.ref<!fir.char<1,10>>, index, !fir.ref<tuple<!fir.boxchar<1>>>) -> !fir.boxchar<1>
+! CHECK: %[[VAL_34:.*]] = fir.emboxproc %[[VAL_33]], %[[VAL_13]] : ((!fir.ref<!fir.char<1,10>>, index, !fir.ref<tuple<!fir.boxchar<1>>>) -> !fir.boxchar<1>, !fir.ref<tuple<!fir.boxchar<1>>>) -> !fir.boxproc<() -> ()>
+! CHECK: %[[VAL_35:.*]] = fir.undefined tuple<!fir.boxproc<() -> ()>, i64>
+! CHECK: %[[VAL_36:.*]] = fir.insert_value %[[VAL_35]], %[[VAL_34]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, !fir.boxproc<() -> ()>) -> tuple<!fir.boxproc<() -> ()>, i64>
+! CHECK: %[[VAL_37:.*]] = fir.insert_value %[[VAL_36]], %[[VAL_8]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>, i64) -> tuple<!fir.boxproc<() -> ()>, i64>
+! CHECK: %[[VAL_38:.*]] = fir.call @llvm.stacksave() : () -> !fir.ref<i8>
+! CHECK: %[[VAL_39:.*]] = fir.call @_QPget_message(%[[VAL_11]], %[[VAL_9]], %[[VAL_37]]) : (!fir.ref<!fir.char<1,40>>, index, tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxchar<1>
+! CHECK: %[[VAL_40:.*]] = fir.convert %[[VAL_11]] : (!fir.ref<!fir.char<1,40>>) -> !fir.ref<i8>
+! CHECK: %[[VAL_41:.*]] = fir.convert %[[VAL_9]] : (index) -> i64
+! CHECK: %[[VAL_42:.*]] = fir.call @_FortranAioOutputAscii(%[[VAL_32]], %[[VAL_40]], %[[VAL_41]]) : (!fir.ref<i8>, !fir.ref<i8>, i64) -> i1
+! CHECK: fir.call @llvm.stackrestore(%[[VAL_38]]) : (!fir.ref<i8>) -> ()
+! CHECK: %[[VAL_43:.*]] = fir.call @_FortranAioEndIoStatement(%[[VAL_32]]) : (!fir.ref<i8>) -> i32
+! CHECK: return
+! CHECK: }
+
+! CHECK-LABEL: func @_QFtest_proc_dummy_charPgen_message(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.char<1,10>>,
+! CHECK-SAME: %[[VAL_1:.*]]: index,
+! CHECK-SAME: %[[VAL_2:.*]]: !fir.ref<tuple<!fir.boxchar<1>>> {fir.host_assoc}) -> !fir.boxchar<1> {
+! CHECK-DAG: %[[VAL_3:.*]] = arith.constant 0 : i32
+! CHECK-DAG: %[[VAL_4:.*]] = arith.constant 10 : index
+! CHECK-DAG: %[[VAL_5:.*]] = arith.constant false
+! CHECK-DAG: %[[VAL_6:.*]] = arith.constant 1 : index
+! CHECK-DAG: %[[VAL_7:.*]] = arith.constant 32 : i8
+! CHECK-DAG: %[[VAL_8:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_9:.*]] = fir.coordinate_of %[[VAL_2]], %[[VAL_3]] : (!fir.ref<tuple<!fir.boxchar<1>>>, i32) -> !fir.ref<!fir.boxchar<1>>
+! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_9]] : !fir.ref<!fir.boxchar<1>>
+! CHECK: %[[VAL_11:.*]]:2 = fir.unboxchar %[[VAL_10]] : (!fir.boxchar<1>) -> (!fir.ref<!fir.char<1,?>>, index)
+! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.char<1,10>>) -> !fir.ref<!fir.char<1,?>>
+! CHECK: %[[VAL_13:.*]] = arith.cmpi slt, %[[VAL_4]], %[[VAL_11]]#1 : index
+! CHECK: %[[VAL_14:.*]] = arith.select %[[VAL_13]], %[[VAL_4]], %[[VAL_11]]#1 : index
+! CHECK: %[[VAL_15:.*]] = fir.convert %[[VAL_14]] : (index) -> i64
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_11]]#0 : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_16]], %[[VAL_17]], %[[VAL_15]], %[[VAL_5]]) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! CHECK: %[[VAL_18:.*]] = fir.undefined !fir.char<1>
+! CHECK: %[[VAL_19:.*]] = fir.insert_value %[[VAL_18]], %[[VAL_7]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1>
+! CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_4]], %[[VAL_14]] : index
+! CHECK: br ^bb1(%[[VAL_14]], %[[VAL_20]] : index, index)
+! CHECK: ^bb1(%[[VAL_21:.*]]: index, %[[VAL_22:.*]]: index):
+! CHECK: %[[VAL_23:.*]] = arith.cmpi sgt, %[[VAL_22]], %[[VAL_8]] : index
+! CHECK: cond_br %[[VAL_23]], ^bb2, ^bb3
+! CHECK: ^bb2:
+! CHECK: %[[VAL_24:.*]] = fir.convert %[[VAL_12]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<?x!fir.char<1>>>
+! CHECK: %[[VAL_25:.*]] = fir.coordinate_of %[[VAL_24]], %[[VAL_21]] : (!fir.ref<!fir.array<?x!fir.char<1>>>, index) -> !fir.ref<!fir.char<1>>
+! CHECK: fir.store %[[VAL_19]] to %[[VAL_25]] : !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_26:.*]] = arith.addi %[[VAL_21]], %[[VAL_6]] : index
+! CHECK: %[[VAL_27:.*]] = arith.subi %[[VAL_22]], %[[VAL_6]] : index
+! CHECK: br ^bb1(%[[VAL_26]], %[[VAL_27]] : index, index)
+! CHECK: ^bb3:
+! CHECK: %[[VAL_28:.*]] = fir.emboxchar %[[VAL_12]], %[[VAL_4]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+! CHECK: return %[[VAL_28]] : !fir.boxchar<1>
+! CHECK: }
+
+! CHECK-LABEL: func @_QPget_message(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.char<1,40>>,
+! CHECK-SAME: %[[VAL_1:.*]]: index,
+! CHECK-SAME: %[[VAL_2:.*]]: tuple<!fir.boxproc<() -> ()>, i64> {fir.char_proc}) -> !fir.boxchar<1> {
+! CHECK: %[[VAL_3:.*]] = arith.constant 40 : index
+! CHECK: %[[VAL_4:.*]] = arith.constant 12 : index
+! CHECK: %[[VAL_5:.*]] = arith.constant false
+! CHECK: %[[VAL_6:.*]] = arith.constant 1 : index
+! CHECK: %[[VAL_7:.*]] = arith.constant 32 : i8
+! CHECK: %[[VAL_8:.*]] = arith.constant 0 : index
+! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_0]] : (!fir.ref<!fir.char<1,40>>) -> !fir.ref<!fir.char<1,?>>
+! CHECK: %[[VAL_10:.*]] = fir.address_of(@_QQcl.{{.*}}) : !fir.ref<!fir.char<1,12>>
+! CHECK: %[[VAL_11:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> !fir.boxproc<() -> ()>
+! CHECK: %[[VAL_12:.*]] = fir.box_addr %[[VAL_11]] : (!fir.boxproc<() -> ()>) -> (() -> ())
+! CHECK: %[[VAL_13:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (tuple<!fir.boxproc<() -> ()>, i64>) -> i64
+! CHECK: %[[VAL_14:.*]] = fir.call @llvm.stacksave() : () -> !fir.ref<i8>
+! CHECK: %[[VAL_15:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_13]] : i64) {bindc_name = ".result"}
+! CHECK: %[[VAL_16:.*]] = fir.convert %[[VAL_12]] : (() -> ()) -> ((!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>)
+! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_13]] : (i64) -> index
+! CHECK: %[[VAL_18:.*]] = fir.call %[[VAL_16]](%[[VAL_15]], %[[VAL_17]]) : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+! CHECK: %[[VAL_19:.*]] = arith.addi %[[VAL_17]], %[[VAL_4]] : index
+! CHECK: %[[VAL_20:.*]] = fir.alloca !fir.char<1,?>(%[[VAL_19]] : index) {bindc_name = ".chrtmp"}
+! CHECK: %[[VAL_21:.*]] = fir.convert %[[VAL_4]] : (index) -> i64
+! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_20]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: %[[VAL_23:.*]] = fir.convert %[[VAL_10]] : (!fir.ref<!fir.char<1,12>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_22]], %[[VAL_23]], %[[VAL_21]], %[[VAL_5]]) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! CHECK: br ^bb1(%[[VAL_4]], %[[VAL_17]] : index, index)
+! CHECK: ^bb1(%[[VAL_24:.*]]: index, %[[VAL_25:.*]]: index):
+! CHECK: %[[VAL_26:.*]] = arith.cmpi sgt, %[[VAL_25]], %[[VAL_8]] : index
+! CHECK: cond_br %[[VAL_26]], ^bb2, ^bb3
+! CHECK: ^bb2:
+! CHECK: %[[VAL_27:.*]] = arith.subi %[[VAL_24]], %[[VAL_4]] : index
+! CHECK: %[[VAL_28:.*]] = fir.convert %[[VAL_15]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<?x!fir.char<1>>>
+! CHECK: %[[VAL_29:.*]] = fir.coordinate_of %[[VAL_28]], %[[VAL_27]] : (!fir.ref<!fir.array<?x!fir.char<1>>>, index) -> !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_30:.*]] = fir.load %[[VAL_29]] : !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_31:.*]] = fir.convert %[[VAL_20]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<?x!fir.char<1>>>
+! CHECK: %[[VAL_32:.*]] = fir.coordinate_of %[[VAL_31]], %[[VAL_24]] : (!fir.ref<!fir.array<?x!fir.char<1>>>, index) -> !fir.ref<!fir.char<1>>
+! CHECK: fir.store %[[VAL_30]] to %[[VAL_32]] : !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_33:.*]] = arith.addi %[[VAL_24]], %[[VAL_6]] : index
+! CHECK: %[[VAL_34:.*]] = arith.subi %[[VAL_25]], %[[VAL_6]] : index
+! CHECK: br ^bb1(%[[VAL_33]], %[[VAL_34]] : index, index)
+! CHECK: ^bb3:
+! CHECK: %[[VAL_35:.*]] = arith.cmpi slt, %[[VAL_3]], %[[VAL_19]] : index
+! CHECK: %[[VAL_36:.*]] = arith.select %[[VAL_35]], %[[VAL_3]], %[[VAL_19]] : index
+! CHECK: %[[VAL_37:.*]] = fir.convert %[[VAL_36]] : (index) -> i64
+! CHECK: %[[VAL_38:.*]] = fir.convert %[[VAL_9]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<i8>
+! CHECK: fir.call @llvm.memmove.p0i8.p0i8.i64(%[[VAL_38]], %[[VAL_22]], %[[VAL_37]], %[[VAL_5]]) : (!fir.ref<i8>, !fir.ref<i8>, i64, i1) -> ()
+! CHECK: %[[VAL_39:.*]] = fir.undefined !fir.char<1>
+! CHECK: %[[VAL_40:.*]] = fir.insert_value %[[VAL_39]], %[[VAL_7]], [0 : index] : (!fir.char<1>, i8) -> !fir.char<1>
+! CHECK: %[[VAL_41:.*]] = arith.subi %[[VAL_3]], %[[VAL_36]] : index
+! CHECK: br ^bb4(%[[VAL_36]], %[[VAL_41]] : index, index)
+! CHECK: ^bb4(%[[VAL_42:.*]]: index, %[[VAL_43:.*]]: index):
+! CHECK: %[[VAL_44:.*]] = arith.cmpi sgt, %[[VAL_43]], %[[VAL_8]] : index
+! CHECK: cond_br %[[VAL_44]], ^bb5, ^bb6
+! CHECK: ^bb5:
+! CHECK: %[[VAL_45:.*]] = fir.convert %[[VAL_9]] : (!fir.ref<!fir.char<1,?>>) -> !fir.ref<!fir.array<?x!fir.char<1>>>
+! CHECK: %[[VAL_46:.*]] = fir.coordinate_of %[[VAL_45]], %[[VAL_42]] : (!fir.ref<!fir.array<?x!fir.char<1>>>, index) -> !fir.ref<!fir.char<1>>
+! CHECK: fir.store %[[VAL_40]] to %[[VAL_46]] : !fir.ref<!fir.char<1>>
+! CHECK: %[[VAL_47:.*]] = arith.addi %[[VAL_42]], %[[VAL_6]] : index
+! CHECK: %[[VAL_48:.*]] = arith.subi %[[VAL_43]], %[[VAL_6]] : index
+! CHECK: br ^bb4(%[[VAL_47]], %[[VAL_48]] : index, index)
+! CHECK: ^bb6:
+! CHECK: fir.call @llvm.stackrestore(%[[VAL_14]]) : (!fir.ref<i8>) -> ()
+! CHECK: %[[VAL_49:.*]] = fir.emboxchar %[[VAL_9]], %[[VAL_3]] : (!fir.ref<!fir.char<1,?>>, index) -> !fir.boxchar<1>
+! CHECK: return %[[VAL_49]] : !fir.boxchar<1>
+! CHECK: }
+
+subroutine test_proc_dummy_char
+ character(40) get_message
+ external get_message
+ character(10) message
+ message = "Hi there!"
+ print *, get_message(gen_message)
+contains
+ function gen_message
+ character(10) :: gen_message
+ gen_message = message
+ end function gen_message
+end subroutine test_proc_dummy_char
+
+function get_message(a)
+ character(40) :: get_message
+ character(*) :: a
+ get_message = "message is: " // a()
+end function get_message
+
+! CHECK-LABEL: func @_QPtest_11a() {
+! CHECK: %[[a:.*]] = fir.address_of(@_QPtest_11b) : () -> ()
+! CHECK: %[[b:.*]] = fir.emboxproc %[[a]] : (() -> ()) -> !fir.boxproc<() -> ()>
+! CHECK: fir.call @_QPtest_11c(%[[b]], %{{.*}}) : (!fir.boxproc<() -> ()>, !fir.ref<i32>) -> ()
+! CHECK: func private @_QPtest_11c(!fir.boxproc<() -> ()>, !fir.ref<i32>)
+
+subroutine test_11a
+ external test_11b
+ call test_11c(test_11b, 3)
+end subroutine test_11a
! since definition should be processed first regardless.
! pass, call, define
+! CHECK-LABEL: func @_QPpass_foo() {
+subroutine pass_foo()
+ external :: foo
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo)
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<2x5xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo)
+end subroutine
! CHECK-LABEL: func @_QPcall_foo(
! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<10xi32>>{{.*}}) {
subroutine call_foo(i)
! fir.call @_QPfoo2(%[[argconvert]]) : (!fir.ref<!fir.array<2x5xi32>>) -> ()
call foo2(i)
end subroutine
+! CHECK-LABEL: func @_QPpass_foo2() {
+subroutine pass_foo2()
+ external :: foo2
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo2)
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<2x5xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo2)
+end subroutine
! CHECK-LABEL: func @_QPfoo2(
! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<2x5xi32>>{{.*}}) {
subroutine foo2(i)
integer :: i(2, 5)
call do_something(i)
end subroutine
+! CHECK-LABEL: func @_QPpass_foo3() {
+subroutine pass_foo3()
+ external :: foo3
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo3)
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<2x5xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo3)
+end subroutine
! define, call, pass
! CHECK-LABEL: func @_QPfoo4(
! fir.call @_QPfoo4(%[[argconvert]]) : (!fir.ref<!fir.array<2x5xi32>>) -> ()
call foo4(i)
end subroutine
+! CHECK-LABEL: func @_QPpass_foo4() {
+subroutine pass_foo4()
+ external :: foo4
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo4)
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<2x5xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo4)
+end subroutine
! define, pass, call
! CHECK-LABEL: func @_QPfoo5(
integer :: i(2, 5)
call do_something(i)
end subroutine
+! CHECK-LABEL: func @_QPpass_foo5() {
+subroutine pass_foo5()
+ external :: foo5
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo5)
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<2x5xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo5)
+end subroutine
! CHECK-LABEL: func @_QPcall_foo5(
! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<10xi32>>{{.*}}) {
subroutine call_foo5(i)
integer :: i(10)
! CHECK-NOT: convert
call foo6(i)
+end subroutine
+! CHECK-LABEL: func @_QPpass_foo6() {
+subroutine pass_foo6()
+ external :: foo6
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo6) : (!fir.ref<!fir.array<10xi32>>) -> ()
+ ! CHECK: fir.emboxproc %[[f]] : ((!fir.ref<!fir.array<10xi32>>) -> ()) -> !fir.boxproc<() -> ()>
+ call bar(foo6)
end subroutine
+! pass, call
+! CHECK-LABEL: func @_QPpass_foo7() {
+subroutine pass_foo7()
+ external :: foo7
+ ! CHECK-NOT: convert
+ call bar(foo7)
+end subroutine
+! CHECK-LABEL: func @_QPcall_foo7(
+! CHECK-SAME: %{{.*}}: !fir.ref<!fir.array<10xi32>>{{.*}}) -> f32 {
+function call_foo7(i)
+ integer :: i(10)
+ ! CHECK: %[[f:.*]] = fir.address_of(@_QPfoo7) : () -> ()
+ ! CHECK: %[[funccast:.*]] = fir.convert %[[f]] : (() -> ()) -> ((!fir.ref<!fir.array<10xi32>>) -> f32)
+ ! CHECK: fir.call %[[funccast]](%arg0) : (!fir.ref<!fir.array<10xi32>>) -> f32
+ call_foo7 = foo7(i)
+end function
+
! call, call with different type
! CHECK-LABEL: func @_QPcall_foo8(
end subroutine
! CHECK: func private @_QPfoo6(!fir.ref<!fir.array<10xi32>>)
+! CHECK: func private @_QPfoo7()
! Test declaration from test_target_in_iface
! CHECK-LABEL: func private @_QPtest_target(!fir.ref<i32> {fir.target}, !fir.box<!fir.array<?xf32>> {fir.target})