// TODO: Added feature constraints.
TARGET_BUILTIN(__builtin_loongarch_dbar, "vIUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_crc_w_d_w, "iLii", "nc", "64bit")
+
#undef BUILTIN
#undef TARGET_BUILTIN
"a randomized struct can only be initialized with a designated initializer">;
def err_cast_from_randomized_struct : Error<
"casting from randomized structure pointer type %0 to %1">;
+
+// LoongArch-specific Diagnostics
+def err_loongarch_builtin_requires_la64 : Error<
+ "this builtin requires target: loongarch64">;
} // end of sema component.
bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
+ bool CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
#include "clang/Basic/BuiltinsLoongArch.def"
};
+bool LoongArchTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ if (getTriple().getArch() == llvm::Triple::loongarch64)
+ Features["64bit"] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
+/// Return true if has this feature.
+bool LoongArchTargetInfo::hasFeature(StringRef Feature) const {
+ bool Is64Bit = getTriple().getArch() == llvm::Triple::loongarch64;
+ // TODO: Handle more features.
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("loongarch32", !Is64Bit)
+ .Case("loongarch64", Is64Bit)
+ .Case("32bit", !Is64Bit)
+ .Case("64bit", Is64Bit)
+ .Default(false);
+}
+
ArrayRef<Builtin::Info> LoongArchTargetInfo::getTargetBuiltins() const {
return llvm::makeArrayRef(BuiltinInfo, clang::LoongArch::LastTSBuiltin -
Builtin::FirstTSBuiltin);
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
+
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
+
+ bool hasFeature(StringRef Feature) const override;
};
class LLVM_LIBRARY_VISIBILITY LoongArch32TargetInfo
case LoongArch::BI__builtin_loongarch_dbar:
ID = Intrinsic::loongarch_dbar;
break;
+ case LoongArch::BI__builtin_loongarch_crc_w_d_w:
+ ID = Intrinsic::loongarch_crc_w_d_w;
+ break;
// TODO: Support more Intrinsics.
}
extern "C" {
#endif
+#if __loongarch_grlen == 64
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_d_w(long int _1, int _2) {
+ return (int)__builtin_loongarch_crc_w_d_w((long int)_1, (int)_2);
+}
+#endif
+
#define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar((_1))
#ifdef __cplusplus
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
+ case llvm::Triple::loongarch32:
+ case llvm::Triple::loongarch64:
+ return CheckLoongArchBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
return CheckHexagonBuiltinArgument(BuiltinID, TheCall);
}
+bool Sema::CheckLoongArchBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ switch (BuiltinID) {
+ default:
+ break;
+ case LoongArch::BI__builtin_loongarch_crc_w_d_w:
+ if (!TI.hasFeature("64bit"))
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_loongarch_builtin_requires_la64)
+ << TheCall->getSourceRange();
+ break;
+ }
+
+ return false;
+}
+
bool Sema::CheckMipsBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID, CallExpr *TheCall) {
return CheckMipsBuiltinCpu(TI, BuiltinID, TheCall) ||
--- /dev/null
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple loongarch32 -emit-llvm -S -verify %s -o /dev/null
+
+#include <larchintrin.h>
+
+int crc_w_d_w(long int a, int b) {
+ return __builtin_loongarch_crc_w_d_w(a, b); // expected-error {{this builtin requires target: loongarch64}}
+}
--- /dev/null
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple loongarch64 -O2 -emit-llvm %s -o - | FileCheck %s
+
+#include <larchintrin.h>
+
+// CHECK-LABEL: @crc_w_d_w(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.crc.w.d.w(i64 [[A:%.*]], i32 [[B:%.*]])
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int crc_w_d_w(long int a, int b) {
+ return __builtin_loongarch_crc_w_d_w(a, b);
+}
// ptr addr, grlen cmpval, grlen newval, grlen mask, grlenimm ordering)
defm int_loongarch_masked_cmpxchg : MaskedAtomicRMWFiveOpIntrinsics;
+//===----------------------------------------------------------------------===//
+// LoongArch BASE
+
def int_loongarch_dbar : Intrinsic<[], [llvm_i32_ty]>;
+def int_loongarch_crc_w_d_w : Intrinsic<[llvm_i32_ty],
+ [llvm_i64_ty, llvm_i32_ty]>;
} // TargetPrefix = "loongarch"
setOperationAction(ISD::CTTZ, MVT::i32, Custom);
setOperationAction(ISD::CTLZ, MVT::i32, Custom);
setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
if (Subtarget.hasBasicF() && !Subtarget.hasBasicD())
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
if (Subtarget.hasBasicF())
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
} else {
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
+ setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
}
static const ISD::CondCode FPCCToExpand[] = {
return lowerGlobalTLSAddress(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN:
return lowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::INTRINSIC_W_CHAIN:
+ return lowerINTRINSIC_W_CHAIN(Op, DAG);
case ISD::INTRINSIC_VOID:
return lowerINTRINSIC_VOID(Op, DAG);
case ISD::BlockAddress:
}
}
+SDValue
+LoongArchTargetLowering::lowerINTRINSIC_W_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ switch (Op.getConstantOperandVal(1)) {
+ default:
+ return Op;
+ case Intrinsic::loongarch_crc_w_d_w: {
+ DAG.getContext()->emitError(
+ "llvm.loongarch.crc.w.d.w requires target: loongarch64");
+ return DAG.getMergeValues(
+ {DAG.getUNDEF(Op.getValueType()), Op.getOperand(0)}, SDLoc(Op));
+ }
+ }
+}
+
SDValue LoongArchTargetLowering::lowerINTRINSIC_VOID(SDValue Op,
SelectionDAG &DAG) const {
SDLoc DL(Op);
Results.push_back(customLegalizeToWOp(N, DAG, 1));
break;
}
+ case ISD::INTRINSIC_W_CHAIN: {
+ assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ "Unexpected custom legalisation");
+
+ switch (N->getConstantOperandVal(1)) {
+ default:
+ llvm_unreachable("Unexpected Intrinsic.");
+ case Intrinsic::loongarch_crc_w_d_w: {
+ Results.push_back(DAG.getNode(
+ ISD::TRUNCATE, DL, N->getValueType(0),
+ DAG.getNode(
+ LoongArchISD::CRC_W_D_W, DL, MVT::i64, N->getOperand(2),
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3)))));
+ Results.push_back(N->getOperand(0));
+ break;
+ }
+ }
+ break;
+ }
}
}
NODE_NAME_CASE(CLZ_W)
NODE_NAME_CASE(CTZ_W)
NODE_NAME_CASE(DBAR)
+ NODE_NAME_CASE(CRC_W_D_W)
}
#undef NODE_NAME_CASE
return nullptr;
// Intrinsic operations
DBAR,
+
+ // CRC check operations
+ CRC_W_D_W
};
} // end namespace LoongArchISD
SDValue lowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+ SDValue lowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
def loongarch_srl_w : SDNode<"LoongArchISD::SRL_W", SDT_LoongArchIntBinOpW>;
def loongarch_rotr_w : SDNode<"LoongArchISD::ROTR_W", SDT_LoongArchIntBinOpW>;
def loongarch_rotl_w : SDNode<"LoongArchISD::ROTL_W", SDT_LoongArchIntBinOpW>;
+def loongarch_crc_w_d_w
+ : SDNode<"LoongArchISD::CRC_W_D_W", SDT_LoongArchIntBinOpW>;
def loongarch_bstrins
: SDNode<"LoongArchISD::BSTRINS", SDT_LoongArchBStrIns>;
def loongarch_bstrpick
let Predicates = [IsLA64] in {
def : Pat<(loongarch_dbar uimm15:$imm15), (DBAR uimm15:$imm15)>;
+
+// CRC Check Instructions
+def : PatGprGpr<loongarch_crc_w_d_w, CRC_W_D_W>;
} // Predicates = [IsLA64]
/// Other pseudo-instructions
--- /dev/null
+; RUN: not llc --mtriple=loongarch32 --disable-verify < %s 2>&1 | FileCheck %s
+
+declare i32 @llvm.loongarch.crc.w.d.w(i64, i32)
+
+define i32 @crc_w_d_w(i64 %a, i32 %b) nounwind {
+; CHECK: llvm.loongarch.crc.w.d.w requires target: loongarch64
+entry:
+ %res = call i32 @llvm.loongarch.crc.w.d.w(i64 %a, i32 %b)
+ ret i32 %res
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s
+
+declare i32 @llvm.loongarch.crc.w.d.w(i64, i32)
+
+define i32 @crc_w_d_w(i64 %a, i32 %b) nounwind {
+; CHECK-LABEL: crc_w_d_w:
+; CHECK: # %bb.0:
+; CHECK-NEXT: crc.w.d.w $a0, $a0, $a1
+; CHECK-NEXT: ret
+ %res = call i32 @llvm.loongarch.crc.w.d.w(i64 %a, i32 %b)
+ ret i32 %res
+}