TARGET_BUILTIN(__builtin_loongarch_csrxchg_w, "UiUiUiIUi", "nc", "")
TARGET_BUILTIN(__builtin_loongarch_csrxchg_d, "ULiULiULiIUi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_b, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_h, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_w, "UiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrrd_d, "ULiUi", "nc", "64bit")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_b, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_h, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_w, "vUiUi", "nc", "")
+TARGET_BUILTIN(__builtin_loongarch_iocsrwr_d, "vULiUi", "nc", "64bit")
+
#undef BUILTIN
#undef TARGET_BUILTIN
case LoongArch::BI__builtin_loongarch_csrxchg_d:
ID = Intrinsic::loongarch_csrxchg_d;
break;
+ case LoongArch::BI__builtin_loongarch_iocsrrd_b:
+ ID = Intrinsic::loongarch_iocsrrd_b;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrrd_h:
+ ID = Intrinsic::loongarch_iocsrrd_h;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrrd_w:
+ ID = Intrinsic::loongarch_iocsrrd_w;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrrd_d:
+ ID = Intrinsic::loongarch_iocsrrd_d;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrwr_b:
+ ID = Intrinsic::loongarch_iocsrwr_b;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrwr_h:
+ ID = Intrinsic::loongarch_iocsrwr_h;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrwr_w:
+ ID = Intrinsic::loongarch_iocsrwr_w;
+ break;
+ case LoongArch::BI__builtin_loongarch_iocsrwr_d:
+ ID = Intrinsic::loongarch_iocsrwr_d;
+ break;
// TODO: Support more Intrinsics.
}
(unsigned long int)(_1), (unsigned long int)(_2), (_3)))
#endif
+extern __inline unsigned char
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_b(unsigned int _1) {
+ return (unsigned char)__builtin_loongarch_iocsrrd_b((unsigned int)_1);
+}
+
+extern __inline unsigned char
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_h(unsigned int _1) {
+ return (unsigned short)__builtin_loongarch_iocsrrd_h((unsigned int)_1);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_w(unsigned int _1) {
+ return (unsigned int)__builtin_loongarch_iocsrrd_w((unsigned int)_1);
+}
+
+#if __loongarch_grlen == 64
+extern __inline unsigned long int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_d(unsigned int _1) {
+ return (unsigned long int)__builtin_loongarch_iocsrrd_d((unsigned int)_1);
+}
+#endif
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_b(unsigned char _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_b((unsigned char)_1, (unsigned int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_h(unsigned short _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_h((unsigned short)_1, (unsigned int)_2);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_w(unsigned int _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_w((unsigned int)_1, (unsigned int)_2);
+}
+
+#if __loongarch_grlen == 64
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_d(unsigned long int _1, unsigned int _2) {
+ __builtin_loongarch_iocsrwr_d((unsigned long int)_1, (unsigned int)_2);
+}
+#endif
+
#ifdef __cplusplus
}
#endif
case LoongArch::BI__builtin_loongarch_crcc_w_h_w:
case LoongArch::BI__builtin_loongarch_crcc_w_w_w:
case LoongArch::BI__builtin_loongarch_crcc_w_d_w:
+ case LoongArch::BI__builtin_loongarch_iocsrrd_d:
+ case LoongArch::BI__builtin_loongarch_iocsrwr_d:
if (!TI.hasFeature("64bit"))
return Diag(TheCall->getBeginLoc(),
diag::err_loongarch_builtin_requires_la64)
__builtin_loongarch_csrxchg_w(a, b, -1); // expected-error {{argument value 4294967295 is outside the valid range [0, 16383]}}
__builtin_loongarch_csrxchg_w(a, b, b); // expected-error {{argument to '__builtin_loongarch_csrxchg_w' must be a constant integer}}
}
+
+unsigned long int iocsrrd_d(unsigned int a) {
+ return __builtin_loongarch_iocsrrd_d(a); // expected-error {{this builtin requires target: loongarch64}}
+}
+
+void iocsrwr_d(unsigned long int a, unsigned int b) {
+ __builtin_loongarch_iocsrwr_d(a, b); // expected-error {{this builtin requires target: loongarch64}}
+}
unsigned int d = __builtin_loongarch_csrxchg_w(a, b, 1);
return 0;
}
+
+// LA32-LABEL: @iocsrrd_b(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[B:%.*]] = alloca i8, align 1
+// LA32-NEXT: [[C:%.*]] = alloca i8, align 1
+// LA32-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: store i32 [[TMP0]], ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = call i32 @llvm.loongarch.iocsrrd.b(i32 [[TMP1]])
+// LA32-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP2]] to i8
+// LA32-NEXT: store i8 [[CONV_I]], ptr [[B]], align 1
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP4:%.*]] = call i32 @llvm.loongarch.iocsrrd.b(i32 [[TMP3]])
+// LA32-NEXT: [[CONV:%.*]] = trunc i32 [[TMP4]] to i8
+// LA32-NEXT: store i8 [[CONV]], ptr [[C]], align 1
+// LA32-NEXT: ret i8 0
+//
+unsigned char iocsrrd_b(unsigned int a) {
+ unsigned char b = __iocsrrd_b(a);
+ unsigned char c = __builtin_loongarch_iocsrrd_b(a);
+ return 0;
+}
+
+// LA32-LABEL: @iocsrrd_h(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[B:%.*]] = alloca i16, align 2
+// LA32-NEXT: [[C:%.*]] = alloca i16, align 2
+// LA32-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: store i32 [[TMP0]], ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = call i32 @llvm.loongarch.iocsrrd.h(i32 [[TMP1]])
+// LA32-NEXT: [[CONV_I:%.*]] = trunc i32 [[TMP2]] to i16
+// LA32-NEXT: [[CONV1_I:%.*]] = trunc i16 [[CONV_I]] to i8
+// LA32-NEXT: [[CONV:%.*]] = zext i8 [[CONV1_I]] to i16
+// LA32-NEXT: store i16 [[CONV]], ptr [[B]], align 2
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP4:%.*]] = call i32 @llvm.loongarch.iocsrrd.h(i32 [[TMP3]])
+// LA32-NEXT: [[CONV1:%.*]] = trunc i32 [[TMP4]] to i16
+// LA32-NEXT: store i16 [[CONV1]], ptr [[C]], align 2
+// LA32-NEXT: ret i16 0
+//
+unsigned short iocsrrd_h(unsigned int a) {
+ unsigned short b = __iocsrrd_h(a);
+ unsigned short c = __builtin_loongarch_iocsrrd_h(a);
+ return 0;
+}
+
+// LA32-LABEL: @iocsrrd_w(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[B:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[C:%.*]] = alloca i32, align 4
+// LA32-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: store i32 [[TMP0]], ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = call i32 @llvm.loongarch.iocsrrd.w(i32 [[TMP1]])
+// LA32-NEXT: store i32 [[TMP2]], ptr [[B]], align 4
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP4:%.*]] = call i32 @llvm.loongarch.iocsrrd.w(i32 [[TMP3]])
+// LA32-NEXT: store i32 [[TMP4]], ptr [[C]], align 4
+// LA32-NEXT: ret i32 0
+//
+unsigned int iocsrrd_w(unsigned int a) {
+ unsigned int b = __iocsrrd_w(a);
+ unsigned int c = __builtin_loongarch_iocsrrd_w(a);
+ return 0;
+}
+
+// LA32-LABEL: @iocsrwr_b(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i8, align 1
+// LA32-NEXT: [[_2_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i8, align 1
+// LA32-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: store i8 [[A:%.*]], ptr [[A_ADDR]], align 1
+// LA32-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i8, ptr [[A_ADDR]], align 1
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: store i8 [[TMP0]], ptr [[_1_ADDR_I]], align 1
+// LA32-NEXT: store i32 [[TMP1]], ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = load i8, ptr [[_1_ADDR_I]], align 1
+// LA32-NEXT: [[CONV_I:%.*]] = zext i8 [[TMP2]] to i32
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.b(i32 [[CONV_I]], i32 [[TMP3]])
+// LA32-NEXT: [[TMP4:%.*]] = load i8, ptr [[A_ADDR]], align 1
+// LA32-NEXT: [[CONV:%.*]] = zext i8 [[TMP4]] to i32
+// LA32-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.b(i32 [[CONV]], i32 [[TMP5]])
+// LA32-NEXT: ret void
+//
+void iocsrwr_b(unsigned char a, unsigned int b) {
+ __iocsrwr_b(a, b);
+ __builtin_loongarch_iocsrwr_b(a, b);
+}
+
+// LA32-LABEL: @iocsrwr_h(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i16, align 2
+// LA32-NEXT: [[_2_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i16, align 2
+// LA32-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: store i16 [[A:%.*]], ptr [[A_ADDR]], align 2
+// LA32-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i16, ptr [[A_ADDR]], align 2
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: store i16 [[TMP0]], ptr [[_1_ADDR_I]], align 2
+// LA32-NEXT: store i32 [[TMP1]], ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = load i16, ptr [[_1_ADDR_I]], align 2
+// LA32-NEXT: [[CONV_I:%.*]] = zext i16 [[TMP2]] to i32
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.h(i32 [[CONV_I]], i32 [[TMP3]])
+// LA32-NEXT: [[TMP4:%.*]] = load i16, ptr [[A_ADDR]], align 2
+// LA32-NEXT: [[CONV:%.*]] = zext i16 [[TMP4]] to i32
+// LA32-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.h(i32 [[CONV]], i32 [[TMP5]])
+// LA32-NEXT: ret void
+//
+void iocsrwr_h(unsigned short a, unsigned int b) {
+ __iocsrwr_h(a, b);
+ __builtin_loongarch_iocsrwr_h(a, b);
+}
+
+// LA32-LABEL: @iocsrwr_w(
+// LA32-NEXT: entry:
+// LA32-NEXT: [[_1_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[_2_ADDR_I:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
+// LA32-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
+// LA32-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
+// LA32-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: store i32 [[TMP0]], ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: store i32 [[TMP1]], ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: [[TMP2:%.*]] = load i32, ptr [[_1_ADDR_I]], align 4
+// LA32-NEXT: [[TMP3:%.*]] = load i32, ptr [[_2_ADDR_I]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.w(i32 [[TMP2]], i32 [[TMP3]])
+// LA32-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
+// LA32-NEXT: [[TMP5:%.*]] = load i32, ptr [[B_ADDR]], align 4
+// LA32-NEXT: call void @llvm.loongarch.iocsrwr.w(i32 [[TMP4]], i32 [[TMP5]])
+// LA32-NEXT: ret void
+//
+void iocsrwr_w(unsigned int a, unsigned int b) {
+ __iocsrwr_w(a, b);
+ __builtin_loongarch_iocsrwr_w(a, b);
+}
unsigned long int d = __builtin_loongarch_csrxchg_d(a, b, 1);
return 0;
}
+
+// CHECK-LABEL: @iocsrrd_b(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.b(i32 [[A:%.*]])
+// CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.b(i32 [[A]])
+// CHECK-NEXT: ret i8 0
+//
+unsigned char iocsrrd_b(unsigned int a) {
+ unsigned char b = __iocsrrd_b(a);
+ unsigned char c = __builtin_loongarch_iocsrrd_b(a);
+ return 0;
+}
+
+// CHECK-LABEL: @iocsrrd_h(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.h(i32 [[A:%.*]])
+// CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.h(i32 [[A]])
+// CHECK-NEXT: ret i16 0
+//
+unsigned short iocsrrd_h(unsigned int a) {
+ unsigned short b = __iocsrrd_h(a);
+ unsigned short c = __builtin_loongarch_iocsrrd_h(a);
+ return 0;
+}
+
+// CHECK-LABEL: @iocsrrd_w(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.w(i32 [[A:%.*]])
+// CHECK-NEXT: [[TMP1:%.*]] = tail call i32 @llvm.loongarch.iocsrrd.w(i32 [[A]])
+// CHECK-NEXT: ret i32 0
+//
+unsigned int iocsrrd_w(unsigned int a) {
+ unsigned int b = __iocsrrd_w(a);
+ unsigned int c = __builtin_loongarch_iocsrrd_w(a);
+ return 0;
+}
+
+// CHECK-LABEL: @iocsrwr_b(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CONV_I:%.*]] = zext i8 [[A:%.*]] to i32
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.b(i32 [[CONV_I]], i32 [[B:%.*]])
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.b(i32 [[CONV_I]], i32 [[B]])
+// CHECK-NEXT: ret void
+//
+void iocsrwr_b(unsigned char a, unsigned int b) {
+ __iocsrwr_b(a, b);
+ __builtin_loongarch_iocsrwr_b(a, b);
+}
+
+// CHECK-LABEL: @iocsrwr_h(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CONV_I:%.*]] = zext i16 [[A:%.*]] to i32
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.h(i32 [[CONV_I]], i32 [[B:%.*]])
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.h(i32 [[CONV_I]], i32 [[B]])
+// CHECK-NEXT: ret void
+//
+void iocsrwr_h(unsigned short a, unsigned int b) {
+ __iocsrwr_h(a, b);
+ __builtin_loongarch_iocsrwr_h(a, b);
+}
+
+// CHECK-LABEL: @iocsrwr_w(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.w(i32 [[A:%.*]], i32 [[B:%.*]])
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.w(i32 [[A]], i32 [[B]])
+// CHECK-NEXT: ret void
+//
+void iocsrwr_w(unsigned int a, unsigned int b) {
+ __iocsrwr_w(a, b);
+ __builtin_loongarch_iocsrwr_w(a, b);
+}
+
+// CHECK-LABEL: @iocsrrd_d(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.loongarch.iocsrrd.d(i32 [[A:%.*]])
+// CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.loongarch.iocsrrd.d(i32 [[A]])
+// CHECK-NEXT: ret i64 0
+//
+unsigned long int iocsrrd_d(unsigned int a) {
+ unsigned long int b = __iocsrrd_d(a);
+ unsigned long int c = __builtin_loongarch_iocsrrd_d(a);
+ return 0;
+}
+
+// CHECK-LABEL: @iocsrwr_d(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.d(i64 [[A:%.*]], i32 [[B:%.*]])
+// CHECK-NEXT: tail call void @llvm.loongarch.iocsrwr.d(i64 [[A]], i32 [[B]])
+// CHECK-NEXT: ret void
+//
+void iocsrwr_d(unsigned long int a, unsigned int b) {
+ __iocsrwr_d(a, b);
+ __builtin_loongarch_iocsrwr_d(a, b);
+}
[llvm_i64_ty, llvm_i64_ty,
llvm_i32_ty],
[ImmArg<ArgIndex<2>>]>;
+
+def int_loongarch_iocsrrd_b : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
+def int_loongarch_iocsrrd_h : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
+def int_loongarch_iocsrrd_w : Intrinsic<[llvm_i32_ty], [llvm_i32_ty]>;
+def int_loongarch_iocsrrd_d : Intrinsic<[llvm_i64_ty], [llvm_i32_ty]>;
+
+def int_loongarch_iocsrwr_b : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
+def int_loongarch_iocsrwr_h : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
+def int_loongarch_iocsrwr_w : Intrinsic<[], [llvm_i32_ty, llvm_i32_ty]>;
+def int_loongarch_iocsrwr_d : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty]>;
} // TargetPrefix = "loongarch"
setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
+ setOperationAction(ISD::INTRINSIC_VOID, MVT::i64, Custom);
}
static const ISD::CondCode FPCCToExpand[] = {
Op0},
DL);
}
+ case Intrinsic::loongarch_iocsrrd_d: {
+ if (Subtarget.is64Bit())
+ return DAG.getMergeValues(
+ {DAG.getNode(
+ LoongArchISD::IOCSRRD_D, DL, GRLenVT, Op0,
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(2))),
+ Op0},
+ DL);
+ else {
+ DAG.getContext()->emitError(
+ "llvm.loongarch.crc.w.d.w requires target: loongarch64");
+ return DAG.getMergeValues({DAG.getUNDEF(Op.getValueType()), Op0}, DL);
+ }
+ }
+#define IOCSRRD_CASE(NAME, NODE) \
+ case Intrinsic::loongarch_##NAME: { \
+ return DAG.getMergeValues( \
+ {DAG.getNode(LoongArchISD::NODE, DL, GRLenVT, Op0, Op.getOperand(2)), \
+ Op0}, \
+ DL); \
+ }
+ IOCSRRD_CASE(iocsrrd_b, IOCSRRD_B);
+ IOCSRRD_CASE(iocsrrd_h, IOCSRRD_H);
+ IOCSRRD_CASE(iocsrrd_w, IOCSRRD_W);
}
}
return DAG.getNode(LoongArchISD::SYSCALL, DL, MVT::Other, Op0,
DAG.getConstant(Imm, DL, GRLenVT));
}
+#define IOCSRWR_CASE(NAME, NODE) \
+ case Intrinsic::loongarch_##NAME: { \
+ SDValue Op3 = Op.getOperand(3); \
+ if (Subtarget.is64Bit()) \
+ return DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Op0, \
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2), \
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op3)); \
+ else \
+ return DAG.getNode(LoongArchISD::NODE, DL, MVT::Other, Op0, Op2, Op3); \
+ }
+ IOCSRWR_CASE(iocsrwr_b, IOCSRWR_B);
+ IOCSRWR_CASE(iocsrwr_h, IOCSRWR_H);
+ IOCSRWR_CASE(iocsrwr_w, IOCSRWR_W);
+ case Intrinsic::loongarch_iocsrwr_d: {
+ if (Subtarget.is64Bit())
+ return DAG.getNode(
+ LoongArchISD::IOCSRWR_D, DL, MVT::Other, Op0, Op2,
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op.getOperand(3)));
+ else {
+ DAG.getContext()->emitError(
+ "llvm.loongarch.iocsrwr.d requires target: loongarch64");
+ return Op.getOperand(0);
+ }
+ }
}
}
CSR_CASE(csrrd_d);
CSR_CASE(csrwr_d);
CSR_CASE(csrxchg_d);
+ CSR_CASE(iocsrrd_d);
case Intrinsic::loongarch_csrrd_w: {
unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
if (!isUInt<14>(Imm)) {
Results.push_back(N->getOperand(0));
break;
}
+#define IOCSRRD_CASE(NAME, NODE) \
+ case Intrinsic::loongarch_##NAME: { \
+ Results.push_back(DAG.getNode( \
+ ISD::TRUNCATE, DL, N->getValueType(0), \
+ DAG.getNode(LoongArchISD::NODE, DL, MVT::i64, Op0, \
+ DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op2)))); \
+ Results.push_back(N->getOperand(0)); \
+ break; \
+ }
+ IOCSRRD_CASE(iocsrrd_b, IOCSRRD_B);
+ IOCSRRD_CASE(iocsrrd_h, IOCSRRD_H);
+ IOCSRRD_CASE(iocsrrd_w, IOCSRRD_W);
}
break;
}
NODE_NAME_CASE(CSRRD)
NODE_NAME_CASE(CSRWR)
NODE_NAME_CASE(CSRXCHG)
+ NODE_NAME_CASE(IOCSRRD_B)
+ NODE_NAME_CASE(IOCSRRD_H)
+ NODE_NAME_CASE(IOCSRRD_W)
+ NODE_NAME_CASE(IOCSRRD_D)
+ NODE_NAME_CASE(IOCSRWR_B)
+ NODE_NAME_CASE(IOCSRWR_H)
+ NODE_NAME_CASE(IOCSRWR_W)
+ NODE_NAME_CASE(IOCSRWR_D)
}
#undef NODE_NAME_CASE
return nullptr;
CSRRD,
CSRWR,
CSRXCHG,
+
+ // IOCSR access operations
+ IOCSRRD_B,
+ IOCSRRD_W,
+ IOCSRRD_H,
+ IOCSRRD_D,
+ IOCSRWR_B,
+ IOCSRWR_H,
+ IOCSRWR_W,
+ IOCSRWR_D,
};
} // end namespace LoongArchISD
SDTCisSameAs<0, 1>,
SDTCisSameAs<0, 2>,
SDTCisVT<3, GRLenVT>]>;
+def SDT_LoongArchIocsrwr : SDTypeProfile<0, 2, [SDTCisInt<0>,
+ SDTCisSameAs<0, 1>]>;
// TODO: Add LoongArch specific DAG Nodes
// Target-independent nodes, but with target-specific formats.
def loongarch_csrxchg : SDNode<"LoongArchISD::CSRXCHG",
SDT_LoongArchCsrxchg,
[SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrrd_b : SDNode<"LoongArchISD::IOCSRRD_B", SDTUnaryOp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrrd_h : SDNode<"LoongArchISD::IOCSRRD_H", SDTUnaryOp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrrd_w : SDNode<"LoongArchISD::IOCSRRD_W", SDTUnaryOp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrrd_d : SDNode<"LoongArchISD::IOCSRRD_D", SDTUnaryOp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrwr_b : SDNode<"LoongArchISD::IOCSRWR_B",
+ SDT_LoongArchIocsrwr,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrwr_h : SDNode<"LoongArchISD::IOCSRWR_H",
+ SDT_LoongArchIocsrwr,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrwr_w : SDNode<"LoongArchISD::IOCSRWR_W",
+ SDT_LoongArchIocsrwr,
+ [SDNPHasChain, SDNPSideEffect]>;
+def loongarch_iocsrwr_d : SDNode<"LoongArchISD::IOCSRWR_D",
+ SDT_LoongArchIocsrwr,
+ [SDNPHasChain, SDNPSideEffect]>;
//===----------------------------------------------------------------------===//
// Operand and SDNode transformation definitions.
(CSRWR GPR:$rd, uimm14:$imm14)>;
def : Pat<(loongarch_csrxchg GPR:$rd, GPR:$rj, uimm14:$imm14),
(CSRXCHG GPR:$rd, GPR:$rj, uimm14:$imm14)>;
+
+def : Pat<(loongarch_iocsrrd_b GPR:$rj), (IOCSRRD_B GPR:$rj)>;
+def : Pat<(loongarch_iocsrrd_h GPR:$rj), (IOCSRRD_H GPR:$rj)>;
+def : Pat<(loongarch_iocsrrd_w GPR:$rj), (IOCSRRD_W GPR:$rj)>;
+
+def : Pat<(loongarch_iocsrwr_b GPR:$rd, GPR:$rj), (IOCSRWR_B GPR:$rd, GPR:$rj)>;
+def : Pat<(loongarch_iocsrwr_h GPR:$rd, GPR:$rj), (IOCSRWR_H GPR:$rd, GPR:$rj)>;
+def : Pat<(loongarch_iocsrwr_w GPR:$rd, GPR:$rj), (IOCSRWR_W GPR:$rd, GPR:$rj)>;
+
+let Predicates = [IsLA64] in {
+def : Pat<(loongarch_iocsrrd_d GPR:$rj), (IOCSRRD_D GPR:$rj)>;
+def : Pat<(loongarch_iocsrwr_d GPR:$rd, GPR:$rj), (IOCSRWR_D GPR:$rd, GPR:$rj)>;
+} // Predicates = [IsLA64]
declare i64 @llvm.loongarch.csrrd.d(i32 immarg)
declare i64 @llvm.loongarch.csrwr.d(i64, i32 immarg)
declare i64 @llvm.loongarch.csrxchg.d(i64, i64, i32 immarg)
+declare i64 @llvm.loongarch.iocsrrd.d(i32)
+declare void @llvm.loongarch.iocsrwr.d(i64, i32)
define i32 @crc_w_b_w(i32 %a, i32 %b) nounwind {
; CHECK: llvm.loongarch.crc.w.b.w requires target: loongarch64
%0 = tail call i64 @llvm.loongarch.csrxchg.d(i64 %a, i64 %b, i32 1)
ret i64 %0
}
+
+define i64 @iocsrrd_d(i32 %a) {
+; CHECK: llvm.loongarch.iocsrrd.d requires target: loongarch64
+entry:
+ %0 = tail call i64 @llvm.loongarch.iocsrrd.d(i32 %a)
+ ret i64 %0
+}
+
+define void @iocsrwr_d(i64 %a, i32 signext %b) {
+; CHECK: llvm.loongarch.iocsrwr.d requires target: loongarch64
+entry:
+ tail call void @llvm.loongarch.iocsrwr.d(i64 %a, i32 %b)
+ ret void
+}
declare i64 @llvm.loongarch.csrrd.d(i32 immarg)
declare i64 @llvm.loongarch.csrwr.d(i64, i32 immarg)
declare i64 @llvm.loongarch.csrxchg.d(i64, i64, i32 immarg)
+declare i64 @llvm.loongarch.iocsrrd.d(i32)
+declare void @llvm.loongarch.iocsrwr.d(i64, i32)
define i32 @crc_w_b_w(i32 %a, i32 %b) nounwind {
; CHECK-LABEL: crc_w_b_w:
%0 = tail call i64 @llvm.loongarch.csrxchg.d(i64 %a, i64 %b, i32 1)
ret i64 %0
}
+
+define i64 @iocsrrd_d(i32 %a) {
+; CHECK-LABEL: iocsrrd_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrrd.d $a0, $a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i64 @llvm.loongarch.iocsrrd.d(i32 %a)
+ ret i64 %0
+}
+
+define void @iocsrwr_d(i64 %a, i32 signext %b) {
+; CHECK-LABEL: iocsrwr_d:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrwr.d $a0, $a1
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.loongarch.iocsrwr.d(i64 %a, i32 %b)
+ ret void
+}
declare i32 @llvm.loongarch.csrrd.w(i32 immarg)
declare i32 @llvm.loongarch.csrwr.w(i32, i32 immarg)
declare i32 @llvm.loongarch.csrxchg.w(i32, i32, i32 immarg)
+declare i32 @llvm.loongarch.iocsrrd.b(i32)
+declare i32 @llvm.loongarch.iocsrrd.h(i32)
+declare i32 @llvm.loongarch.iocsrrd.w(i32)
+declare void @llvm.loongarch.iocsrwr.b(i32, i32)
+declare void @llvm.loongarch.iocsrwr.h(i32, i32)
+declare void @llvm.loongarch.iocsrwr.w(i32, i32)
define void @foo() nounwind {
; CHECK-LABEL: foo:
%0 = tail call i32 @llvm.loongarch.csrxchg.w(i32 %a, i32 %b, i32 1)
ret i32 %0
}
+
+define i32 @iocsrrd_b(i32 %a) {
+; CHECK-LABEL: iocsrrd_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrrd.b $a0, $a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.loongarch.iocsrrd.b(i32 %a)
+ ret i32 %0
+}
+
+define i32 @iocsrrd_h(i32 %a) {
+; CHECK-LABEL: iocsrrd_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrrd.h $a0, $a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.loongarch.iocsrrd.h(i32 %a)
+ ret i32 %0
+}
+
+define i32 @iocsrrd_w(i32 %a) {
+; CHECK-LABEL: iocsrrd_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrrd.w $a0, $a0
+; CHECK-NEXT: ret
+entry:
+ %0 = tail call i32 @llvm.loongarch.iocsrrd.w(i32 %a)
+ ret i32 %0
+}
+
+define void @iocsrwr_b(i32 %a, i32 %b) {
+; CHECK-LABEL: iocsrwr_b:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrwr.b $a0, $a1
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.loongarch.iocsrwr.b(i32 %a, i32 %b)
+ ret void
+}
+
+define void @iocsrwr_h(i32 %a, i32 %b) {
+; CHECK-LABEL: iocsrwr_h:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrwr.h $a0, $a1
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.loongarch.iocsrwr.h(i32 %a, i32 %b)
+ ret void
+}
+
+define void @iocsrwr_w(i32 %a, i32 %b) {
+; CHECK-LABEL: iocsrwr_w:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: iocsrwr.w $a0, $a1
+; CHECK-NEXT: ret
+entry:
+ tail call void @llvm.loongarch.iocsrwr.w(i32 %a, i32 %b)
+ ret void
+}