//===----------------------------------------------------------------------===//
#include "flang/Lower/IntrinsicCall.h"
+#include "flang/Common/static-multimap-view.h"
#include "flang/Lower/SymbolMap.h"
#include "flang/Lower/Todo.h"
+#include "flang/Optimizer/Builder/Complex.h"
#include "flang/Optimizer/Builder/FIRBuilder.h"
+#include "flang/Optimizer/Builder/Runtime/RTBuilder.h"
#include "flang/Optimizer/Support/FatalError.h"
+#include "llvm/Support/CommandLine.h"
#define DEBUG_TYPE "flang-lower-intrinsic"
+#define PGMATH_DECLARE
+#include "flang/Evaluate/pgmath.h.inc"
+
/// This file implements lowering of Fortran intrinsic procedures.
/// Intrinsics are lowered to a mix of FIR and MLIR operations as
/// well as call to runtime functions or LLVM intrinsics.
llvm::Optional<mlir::Type> resultType,
llvm::ArrayRef<fir::ExtendedValue> arg);
- mlir::Value genIand(mlir::Type, llvm::ArrayRef<mlir::Value>);
+ /// Search a runtime function that is associated to the generic intrinsic name
+ /// and whose signature matches the intrinsic arguments and result types.
+ /// If no such runtime function is found but a runtime function associated
+ /// with the Fortran generic exists and has the same number of arguments,
+ /// conversions will be inserted before and/or after the call. This is to
+ /// mainly to allow 16 bits float support even-though little or no math
+ /// runtime is currently available for it.
+ mlir::Value genRuntimeCall(llvm::StringRef name, mlir::Type,
+ llvm::ArrayRef<mlir::Value>);
+
+ using RuntimeCallGenerator = std::function<mlir::Value(
+ fir::FirOpBuilder &, mlir::Location, llvm::ArrayRef<mlir::Value>)>;
+ RuntimeCallGenerator
+ getRuntimeCallGenerator(llvm::StringRef name,
+ mlir::FunctionType soughtFuncType);
+ mlir::Value genAbs(mlir::Type, llvm::ArrayRef<mlir::Value>);
+ mlir::Value genIand(mlir::Type, llvm::ArrayRef<mlir::Value>);
/// Define the different FIR generators that can be mapped to intrinsic to
/// generate the related code.
- using ElementalGenerator = decltype(&IntrinsicLibrary::genIand);
+ using ElementalGenerator = decltype(&IntrinsicLibrary::genAbs);
using Generator = std::variant<ElementalGenerator>;
/// Generate calls to ElementalGenerator, handling the elemental aspects
mlir::Value invokeGenerator(ElementalGenerator generator,
mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args);
+ mlir::Value invokeGenerator(RuntimeCallGenerator generator,
+ mlir::Type resultType,
+ llvm::ArrayRef<mlir::Value> args);
fir::FirOpBuilder &builder;
mlir::Location loc;
};
/// argument must not be lowered by value. In which case, the lowering rules
/// should be provided for all the intrinsic arguments for completeness.
static constexpr IntrinsicHandler handlers[]{
+ {"abs", &I::genAbs},
{"iand", &I::genIand},
};
}
//===----------------------------------------------------------------------===//
+// Math runtime description and matching utility
+//===----------------------------------------------------------------------===//
+
+struct RuntimeFunction {
+ // llvm::StringRef comparison operator are not constexpr, so use string_view.
+ using Key = std::string_view;
+ // Needed for implicit compare with keys.
+ constexpr operator Key() const { return key; }
+ Key key; // intrinsic name
+ llvm::StringRef symbol;
+ fir::runtime::FuncTypeBuilderFunc typeGenerator;
+};
+
+#define RUNTIME_STATIC_DESCRIPTION(name, func) \
+ {#name, #func, fir::runtime::RuntimeTableKey<decltype(func)>::getTypeModel()},
+static constexpr RuntimeFunction pgmathFast[] = {
+#define PGMATH_FAST
+#define PGMATH_USE_ALL_TYPES(name, func) RUNTIME_STATIC_DESCRIPTION(name, func)
+#include "flang/Evaluate/pgmath.h.inc"
+};
+
+static mlir::FunctionType genF32F32FuncType(mlir::MLIRContext *context) {
+ auto t = mlir::FloatType::getF32(context);
+ return mlir::FunctionType::get(context, {t}, {t});
+}
+
+static mlir::FunctionType genF64F64FuncType(mlir::MLIRContext *context) {
+ auto t = mlir::FloatType::getF64(context);
+ return mlir::FunctionType::get(context, {t}, {t});
+}
+
+// TODO : Fill-up this table with more intrinsic.
+// Note: These are also defined as operations in LLVM dialect. See if this
+// can be use and has advantages.
+static constexpr RuntimeFunction llvmIntrinsics[] = {
+ {"abs", "llvm.fabs.f32", genF32F32FuncType},
+ {"abs", "llvm.fabs.f64", genF64F64FuncType},
+};
+
+// This helper class computes a "distance" between two function types.
+// The distance measures how many narrowing conversions of actual arguments
+// and result of "from" must be made in order to use "to" instead of "from".
+// For instance, the distance between ACOS(REAL(10)) and ACOS(REAL(8)) is
+// greater than the one between ACOS(REAL(10)) and ACOS(REAL(16)). This means
+// if no implementation of ACOS(REAL(10)) is available, it is better to use
+// ACOS(REAL(16)) with casts rather than ACOS(REAL(8)).
+// Note that this is not a symmetric distance and the order of "from" and "to"
+// arguments matters, d(foo, bar) may not be the same as d(bar, foo) because it
+// may be safe to replace foo by bar, but not the opposite.
+class FunctionDistance {
+public:
+ FunctionDistance() : infinite{true} {}
+
+ FunctionDistance(mlir::FunctionType from, mlir::FunctionType to) {
+ unsigned nInputs = from.getNumInputs();
+ unsigned nResults = from.getNumResults();
+ if (nResults != to.getNumResults() || nInputs != to.getNumInputs()) {
+ infinite = true;
+ } else {
+ for (decltype(nInputs) i{0}; i < nInputs && !infinite; ++i)
+ addArgumentDistance(from.getInput(i), to.getInput(i));
+ for (decltype(nResults) i{0}; i < nResults && !infinite; ++i)
+ addResultDistance(to.getResult(i), from.getResult(i));
+ }
+ }
+
+ /// Beware both d1.isSmallerThan(d2) *and* d2.isSmallerThan(d1) may be
+ /// false if both d1 and d2 are infinite. This implies that
+ /// d1.isSmallerThan(d2) is not equivalent to !d2.isSmallerThan(d1)
+ bool isSmallerThan(const FunctionDistance &d) const {
+ return !infinite &&
+ (d.infinite || std::lexicographical_compare(
+ conversions.begin(), conversions.end(),
+ d.conversions.begin(), d.conversions.end()));
+ }
+
+ bool isLosingPrecision() const {
+ return conversions[narrowingArg] != 0 || conversions[extendingResult] != 0;
+ }
+
+ bool isInfinite() const { return infinite; }
+
+private:
+ enum class Conversion { Forbidden, None, Narrow, Extend };
+
+ void addArgumentDistance(mlir::Type from, mlir::Type to) {
+ switch (conversionBetweenTypes(from, to)) {
+ case Conversion::Forbidden:
+ infinite = true;
+ break;
+ case Conversion::None:
+ break;
+ case Conversion::Narrow:
+ conversions[narrowingArg]++;
+ break;
+ case Conversion::Extend:
+ conversions[nonNarrowingArg]++;
+ break;
+ }
+ }
+
+ void addResultDistance(mlir::Type from, mlir::Type to) {
+ switch (conversionBetweenTypes(from, to)) {
+ case Conversion::Forbidden:
+ infinite = true;
+ break;
+ case Conversion::None:
+ break;
+ case Conversion::Narrow:
+ conversions[nonExtendingResult]++;
+ break;
+ case Conversion::Extend:
+ conversions[extendingResult]++;
+ break;
+ }
+ }
+
+ // Floating point can be mlir::FloatType or fir::real
+ static unsigned getFloatingPointWidth(mlir::Type t) {
+ if (auto f{t.dyn_cast<mlir::FloatType>()})
+ return f.getWidth();
+ // FIXME: Get width another way for fir.real/complex
+ // - use fir/KindMapping.h and llvm::Type
+ // - or use evaluate/type.h
+ if (auto r{t.dyn_cast<fir::RealType>()})
+ return r.getFKind() * 4;
+ if (auto cplx{t.dyn_cast<fir::ComplexType>()})
+ return cplx.getFKind() * 4;
+ llvm_unreachable("not a floating-point type");
+ }
+
+ static Conversion conversionBetweenTypes(mlir::Type from, mlir::Type to) {
+ if (from == to) {
+ return Conversion::None;
+ }
+ if (auto fromIntTy{from.dyn_cast<mlir::IntegerType>()}) {
+ if (auto toIntTy{to.dyn_cast<mlir::IntegerType>()}) {
+ return fromIntTy.getWidth() > toIntTy.getWidth() ? Conversion::Narrow
+ : Conversion::Extend;
+ }
+ }
+ if (fir::isa_real(from) && fir::isa_real(to)) {
+ return getFloatingPointWidth(from) > getFloatingPointWidth(to)
+ ? Conversion::Narrow
+ : Conversion::Extend;
+ }
+ if (auto fromCplxTy{from.dyn_cast<fir::ComplexType>()}) {
+ if (auto toCplxTy{to.dyn_cast<fir::ComplexType>()}) {
+ return getFloatingPointWidth(fromCplxTy) >
+ getFloatingPointWidth(toCplxTy)
+ ? Conversion::Narrow
+ : Conversion::Extend;
+ }
+ }
+ // Notes:
+ // - No conversion between character types, specialization of runtime
+ // functions should be made instead.
+ // - It is not clear there is a use case for automatic conversions
+ // around Logical and it may damage hidden information in the physical
+ // storage so do not do it.
+ return Conversion::Forbidden;
+ }
+
+ // Below are indexes to access data in conversions.
+ // The order in data does matter for lexicographical_compare
+ enum {
+ narrowingArg = 0, // usually bad
+ extendingResult, // usually bad
+ nonExtendingResult, // usually ok
+ nonNarrowingArg, // usually ok
+ dataSize
+ };
+
+ std::array<int, dataSize> conversions{/* zero init*/};
+ bool infinite{false}; // When forbidden conversion or wrong argument number
+};
+
+/// Build mlir::FuncOp from runtime symbol description and add
+/// fir.runtime attribute.
+static mlir::FuncOp getFuncOp(mlir::Location loc, fir::FirOpBuilder &builder,
+ const RuntimeFunction &runtime) {
+ mlir::FuncOp function = builder.addNamedFunction(
+ loc, runtime.symbol, runtime.typeGenerator(builder.getContext()));
+ function->setAttr("fir.runtime", builder.getUnitAttr());
+ return function;
+}
+
+/// Select runtime function that has the smallest distance to the intrinsic
+/// function type and that will not imply narrowing arguments or extending the
+/// result.
+/// If nothing is found, the mlir::FuncOp will contain a nullptr.
+mlir::FuncOp searchFunctionInLibrary(
+ mlir::Location loc, fir::FirOpBuilder &builder,
+ const Fortran::common::StaticMultimapView<RuntimeFunction> &lib,
+ llvm::StringRef name, mlir::FunctionType funcType,
+ const RuntimeFunction **bestNearMatch,
+ FunctionDistance &bestMatchDistance) {
+ auto range = lib.equal_range(name);
+ for (auto iter{range.first}; iter != range.second && iter; ++iter) {
+ const auto &impl = *iter;
+ auto implType = impl.typeGenerator(builder.getContext());
+ if (funcType == implType) {
+ return getFuncOp(loc, builder, impl); // exact match
+ } else {
+ FunctionDistance distance(funcType, implType);
+ if (distance.isSmallerThan(bestMatchDistance)) {
+ *bestNearMatch = &impl;
+ bestMatchDistance = std::move(distance);
+ }
+ }
+ }
+ return {};
+}
+
+/// Search runtime for the best runtime function given an intrinsic name
+/// and interface. The interface may not be a perfect match in which case
+/// the caller is responsible to insert argument and return value conversions.
+/// If nothing is found, the mlir::FuncOp will contain a nullptr.
+static mlir::FuncOp getRuntimeFunction(mlir::Location loc,
+ fir::FirOpBuilder &builder,
+ llvm::StringRef name,
+ mlir::FunctionType funcType) {
+ const RuntimeFunction *bestNearMatch = nullptr;
+ FunctionDistance bestMatchDistance{};
+ mlir::FuncOp match;
+ using RtMap = Fortran::common::StaticMultimapView<RuntimeFunction>;
+ static constexpr RtMap pgmathF(pgmathFast);
+ static_assert(pgmathF.Verify() && "map must be sorted");
+ match = searchFunctionInLibrary(loc, builder, pgmathF, name, funcType,
+ &bestNearMatch, bestMatchDistance);
+ if (match)
+ return match;
+
+ // Go through llvm intrinsics if not exact match in libpgmath or if
+ // mathRuntimeVersion == llvmOnly
+ static constexpr RtMap llvmIntr(llvmIntrinsics);
+ static_assert(llvmIntr.Verify() && "map must be sorted");
+ if (mlir::FuncOp exactMatch =
+ searchFunctionInLibrary(loc, builder, llvmIntr, name, funcType,
+ &bestNearMatch, bestMatchDistance))
+ return exactMatch;
+
+ if (bestNearMatch != nullptr) {
+ if (bestMatchDistance.isLosingPrecision()) {
+ // Using this runtime version requires narrowing the arguments
+ // or extending the result. It is not numerically safe. There
+ // is currently no quad math library that was described in
+ // lowering and could be used here. Emit an error and continue
+ // generating the code with the narrowing cast so that the user
+ // can get a complete list of the problematic intrinsic calls.
+ std::string message("TODO: no math runtime available for '");
+ llvm::raw_string_ostream sstream(message);
+ if (name == "pow") {
+ assert(funcType.getNumInputs() == 2 &&
+ "power operator has two arguments");
+ sstream << funcType.getInput(0) << " ** " << funcType.getInput(1);
+ } else {
+ sstream << name << "(";
+ if (funcType.getNumInputs() > 0)
+ sstream << funcType.getInput(0);
+ for (mlir::Type argType : funcType.getInputs().drop_front())
+ sstream << ", " << argType;
+ sstream << ")";
+ }
+ sstream << "'";
+ mlir::emitError(loc, message);
+ }
+ return getFuncOp(loc, builder, *bestNearMatch);
+ }
+ return {};
+}
+
+/// Helpers to get function type from arguments and result type.
+static mlir::FunctionType getFunctionType(llvm::Optional<mlir::Type> resultType,
+ llvm::ArrayRef<mlir::Value> arguments,
+ fir::FirOpBuilder &builder) {
+ llvm::SmallVector<mlir::Type> argTypes;
+ for (mlir::Value arg : arguments)
+ argTypes.push_back(arg.getType());
+ llvm::SmallVector<mlir::Type> resTypes;
+ if (resultType)
+ resTypes.push_back(*resultType);
+ return mlir::FunctionType::get(builder.getModule().getContext(), argTypes,
+ resTypes);
+}
+//===----------------------------------------------------------------------===//
// IntrinsicLibrary
//===----------------------------------------------------------------------===//
llvm::ArrayRef<mlir::Value> args) {
return std::invoke(generator, *this, resultType, args);
}
+
+mlir::Value
+IntrinsicLibrary::invokeGenerator(RuntimeCallGenerator generator,
+ mlir::Type resultType,
+ llvm::ArrayRef<mlir::Value> args) {
+ return generator(builder, loc, args);
+}
+IntrinsicLibrary::RuntimeCallGenerator
+IntrinsicLibrary::getRuntimeCallGenerator(llvm::StringRef name,
+ mlir::FunctionType soughtFuncType) {
+ mlir::FuncOp funcOp = getRuntimeFunction(loc, builder, name, soughtFuncType);
+ if (!funcOp) {
+ mlir::emitError(loc,
+ "TODO: missing intrinsic lowering: " + llvm::Twine(name));
+ llvm::errs() << "requested type was: " << soughtFuncType << "\n";
+ exit(1);
+ }
+
+ mlir::FunctionType actualFuncType = funcOp.getType();
+ assert(actualFuncType.getNumResults() == soughtFuncType.getNumResults() &&
+ actualFuncType.getNumInputs() == soughtFuncType.getNumInputs() &&
+ actualFuncType.getNumResults() == 1 && "Bad intrinsic match");
+
+ return [funcOp, actualFuncType,
+ soughtFuncType](fir::FirOpBuilder &builder, mlir::Location loc,
+ llvm::ArrayRef<mlir::Value> args) {
+ llvm::SmallVector<mlir::Value> convertedArguments;
+ for (auto [fst, snd] : llvm::zip(actualFuncType.getInputs(), args))
+ convertedArguments.push_back(builder.createConvert(loc, fst, snd));
+ auto call = builder.create<fir::CallOp>(loc, funcOp, convertedArguments);
+ mlir::Type soughtType = soughtFuncType.getResult(0);
+ return builder.createConvert(loc, soughtType, call.getResult(0));
+ };
+}
//===----------------------------------------------------------------------===//
// Code generators for the intrinsic
//===----------------------------------------------------------------------===//
+mlir::Value IntrinsicLibrary::genRuntimeCall(llvm::StringRef name,
+ mlir::Type resultType,
+ llvm::ArrayRef<mlir::Value> args) {
+ mlir::FunctionType soughtFuncType =
+ getFunctionType(resultType, args, builder);
+ return getRuntimeCallGenerator(name, soughtFuncType)(builder, loc, args);
+}
+
+// ABS
+mlir::Value IntrinsicLibrary::genAbs(mlir::Type resultType,
+ llvm::ArrayRef<mlir::Value> args) {
+ assert(args.size() == 1);
+ mlir::Value arg = args[0];
+ mlir::Type type = arg.getType();
+ if (fir::isa_real(type)) {
+ // Runtime call to fp abs. An alternative would be to use mlir math::AbsFOp
+ // but it does not support all fir floating point types.
+ return genRuntimeCall("abs", resultType, args);
+ }
+ if (auto intType = type.dyn_cast<mlir::IntegerType>()) {
+ // At the time of this implementation there is no abs op in mlir.
+ // So, implement abs here without branching.
+ mlir::Value shift =
+ builder.createIntegerConstant(loc, intType, intType.getWidth() - 1);
+ auto mask = builder.create<mlir::arith::ShRSIOp>(loc, arg, shift);
+ auto xored = builder.create<mlir::arith::XOrIOp>(loc, arg, mask);
+ return builder.create<mlir::arith::SubIOp>(loc, xored, mask);
+ }
+ if (fir::isa_complex(type)) {
+ // Use HYPOT to fulfill the no underflow/overflow requirement.
+ auto parts = fir::factory::Complex{builder, loc}.extractParts(arg);
+ llvm::SmallVector<mlir::Value> args = {parts.first, parts.second};
+ return genRuntimeCall("hypot", resultType, args);
+ }
+ llvm_unreachable("unexpected type in ABS argument");
+}
+
// IAND
mlir::Value IntrinsicLibrary::genIand(mlir::Type resultType,
llvm::ArrayRef<mlir::Value> args) {
--- /dev/null
+! RUN: bbc -emit-fir %s -o - | FileCheck %s
+! RUN: %flang_fc1 -emit-fir %s -o - | FileCheck %s
+
+! Test abs intrinsic for various types (int, float, complex)
+
+! CHECK-LABEL: func @_QPabs_testi
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i32>{{.*}}, %[[VAL_1:.*]]: !fir.ref<i32>
+subroutine abs_testi(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i32>
+! CHECK: %[[VAL_3:.*]] = arith.constant 31 : i32
+! CHECK: %[[VAL_4:.*]] = arith.shrsi %[[VAL_2]], %[[VAL_3]] : i32
+! CHECK: %[[VAL_5:.*]] = arith.xori %[[VAL_2]], %[[VAL_4]] : i32
+! CHECK: %[[VAL_6:.*]] = arith.subi %[[VAL_5]], %[[VAL_4]] : i32
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_1]] : !fir.ref<i32>
+! CHECK: return
+ integer :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testi16
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<i128>{{.*}}, %[[VAL_1:.*]]: !fir.ref<i128>
+subroutine abs_testi16(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<i128>
+! CHECK: %[[VAL_3:.*]] = arith.constant 127 : i128
+! CHECK: %[[VAL_4:.*]] = arith.shrsi %[[VAL_2]], %[[VAL_3]] : i128
+! CHECK: %[[VAL_5:.*]] = arith.xori %[[VAL_2]], %[[VAL_4]] : i128
+! CHECK: %[[VAL_6:.*]] = arith.subi %[[VAL_5]], %[[VAL_4]] : i128
+! CHECK: fir.store %[[VAL_6]] to %[[VAL_1]] : !fir.ref<i128>
+! CHECK: return
+ integer(kind=16) :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testh(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f16>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f16>{{.*}}) {
+subroutine abs_testh(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f16>
+! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (f16) -> f32
+! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2_1]]) : (f32) -> f32
+! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> f16
+! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<f16>
+! CHECK: return
+ real(kind=2) :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testb(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<bf16>{{.*}}, %[[VAL_1:.*]]: !fir.ref<bf16>{{.*}}) {
+subroutine abs_testb(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<bf16>
+! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (bf16) -> f32
+! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2_1]]) : (f32) -> f32
+! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> bf16
+! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<bf16>
+! CHECK: return
+ real(kind=3) :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testr(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f32>{{.*}}) {
+subroutine abs_testr(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f32>
+! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f32(%[[VAL_2]]) : (f32) -> f32
+! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f32>
+! CHECK: return
+ real :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testd(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f64>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f64>{{.*}}) {
+subroutine abs_testd(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f64>
+! CHECK: %[[VAL_3:.*]] = fir.call @llvm.fabs.f64(%[[VAL_2]]) : (f64) -> f64
+! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f64>
+! CHECK: return
+ real(kind=8) :: a, b
+ b = abs(a)
+end subroutine
+
+! CHECK-LABEL: func @_QPabs_testzr(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.complex<4>>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f32>{{.*}}) {
+subroutine abs_testzr(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.complex<4>>
+! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (!fir.complex<4>) -> f32
+! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (!fir.complex<4>) -> f32
+! CHECK: %[[VAL_5:.*]] = fir.call @__mth_i_hypot(%[[VAL_3]], %[[VAL_4]]) : (f32, f32) -> f32
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<f32>
+! CHECK: return
+ complex :: a
+ real :: b
+ b = abs(a)
+end subroutine abs_testzr
+
+! CHECK-LABEL: func @_QPabs_testzd(
+! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<!fir.complex<8>>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f64>{{.*}}) {
+subroutine abs_testzd(a, b)
+! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<!fir.complex<8>>
+! CHECK: %[[VAL_3:.*]] = fir.extract_value %[[VAL_2]], [0 : index] : (!fir.complex<8>) -> f64
+! CHECK: %[[VAL_4:.*]] = fir.extract_value %[[VAL_2]], [1 : index] : (!fir.complex<8>) -> f64
+! CHECK: %[[VAL_5:.*]] = fir.call @__mth_i_dhypot(%[[VAL_3]], %[[VAL_4]]) : (f64, f64) -> f64
+! CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref<f64>
+! CHECK: return
+ complex(kind=8) :: a
+ real(kind=8) :: b
+ b = abs(a)
+end subroutine abs_testzd