setBooleanContents(ZeroOrOneBooleanContent);
+ setMaxAtomicSizeInBitsSupported(Subtarget.getGRLen());
+
// Function alignments.
const Align FunctionAlignment(4);
setMinFunctionAlignment(FunctionAlignment);
bool isFPImmLegal(const APFloat &Imm, EVT VT,
bool ForCodeSize) const override;
+
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ return isa<LoadInst>(I) || isa<StoreInst>(I);
+ }
};
} // end namespace llvm
defm : StPat<store, ST_D, GPR, i64>;
} // Predicates = [IsLA64]
+/// Atomic loads and stores
+
+def : Pat<(atomic_fence timm, timm), (DBAR 0)>;
+
+defm : LdPat<atomic_load_8, LD_B>;
+defm : LdPat<atomic_load_16, LD_H>;
+defm : LdPat<atomic_load_32, LD_W>;
+
+defm : StPat<atomic_store_8, ST_B, GPR, GRLenVT>;
+defm : StPat<atomic_store_16, ST_H, GPR, GRLenVT>;
+defm : StPat<atomic_store_32, ST_W, GPR, i32>, Requires<[IsLA32]>;
+let Predicates = [IsLA64] in {
+defm : LdPat<atomic_load_64, LD_D>;
+defm : StPat<atomic_store_32, ST_W, GPR, i64>;
+defm : StPat<atomic_store_64, ST_D, GPR, i64>;
+} // Predicates = [IsLA64]
+
/// Other pseudo-instructions
// Pessimistically assume the stack pointer will be clobbered
return getTM<LoongArchTargetMachine>();
}
+ void addIRPasses() override;
bool addInstSelector() override;
};
} // namespace
return new LoongArchPassConfig(*this, PM);
}
+void LoongArchPassConfig::addIRPasses() {
+ addPass(createAtomicExpandPass());
+
+ TargetPassConfig::addIRPasses();
+}
+
bool LoongArchPassConfig::addInstSelector() {
addPass(createLoongArchISelDag(getLoongArchTargetMachine()));
--- /dev/null
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define void @fence_acquire() nounwind {
+; LA32-LABEL: fence_acquire:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ fence acquire
+ ret void
+}
+
+define void @fence_release() nounwind {
+; LA32-LABEL: fence_release:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_release:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ fence release
+ ret void
+}
+
+define void @fence_acq_rel() nounwind {
+; LA32-LABEL: fence_acq_rel:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ fence acq_rel
+ ret void
+}
+
+define void @fence_seq_cst() nounwind {
+; LA32-LABEL: fence_seq_cst:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ fence seq_cst
+ ret void
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define i8 @load_acquire_i8(ptr %ptr) {
+; LA32-LABEL: load_acquire_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.b $a0, $a0, 0
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.b $a0, $a0, 0
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load atomic i8, ptr %ptr acquire, align 1
+ ret i8 %val
+}
+
+define i16 @load_acquire_i16(ptr %ptr) {
+; LA32-LABEL: load_acquire_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.h $a0, $a0, 0
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.h $a0, $a0, 0
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load atomic i16, ptr %ptr acquire, align 2
+ ret i16 %val
+}
+
+define i32 @load_acquire_i32(ptr %ptr) {
+; LA32-LABEL: load_acquire_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: ld.w $a0, $a0, 0
+; LA32-NEXT: dbar 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.w $a0, $a0, 0
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load atomic i32, ptr %ptr acquire, align 4
+ ret i32 %val
+}
+
+define i64 @load_acquire_i64(ptr %ptr) {
+; LA32-LABEL: load_acquire_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: ori $a1, $zero, 2
+; LA32-NEXT: bl __atomic_load_8
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: ld.d $a0, $a0, 0
+; LA64-NEXT: dbar 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ %val = load atomic i64, ptr %ptr acquire, align 8
+ ret i64 %val
+}
+
+define void @store_release_i8(ptr %ptr, i8 signext %v) {
+; LA32-LABEL: store_release_i8:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: st.b $a0, $a1, 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i8:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: st.b $a0, $a1, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ store atomic i8 %v, ptr %ptr release, align 1
+ ret void
+}
+
+define void @store_release_i16(ptr %ptr, i16 signext %v) {
+; LA32-LABEL: store_release_i16:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: st.h $a0, $a1, 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i16:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: st.h $a0, $a1, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ store atomic i16 %v, ptr %ptr release, align 2
+ ret void
+}
+
+define void @store_release_i32(ptr %ptr, i32 signext %v) {
+; LA32-LABEL: store_release_i32:
+; LA32: # %bb.0:
+; LA32-NEXT: dbar 0
+; LA32-NEXT: st.w $a0, $a1, 0
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i32:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: st.w $a0, $a1, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ store atomic i32 %v, ptr %ptr release, align 4
+ ret void
+}
+
+define void @store_release_i64(ptr %ptr, i64 %v) {
+; LA32-LABEL: store_release_i64:
+; LA32: # %bb.0:
+; LA32-NEXT: addi.w $sp, $sp, -16
+; LA32-NEXT: .cfi_def_cfa_offset 16
+; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT: .cfi_offset 1, -4
+; LA32-NEXT: ori $a3, $zero, 3
+; LA32-NEXT: bl __atomic_store_8
+; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT: addi.w $sp, $sp, 16
+; LA32-NEXT: jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i64:
+; LA64: # %bb.0:
+; LA64-NEXT: dbar 0
+; LA64-NEXT: st.d $a0, $a1, 0
+; LA64-NEXT: jirl $zero, $ra, 0
+ store atomic i64 %v, ptr %ptr release, align 8
+ ret void
+}
--- /dev/null
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'LoongArch' in targets:
+ config.unsupported = True
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S --mtriple=loongarch32 --atomic-expand %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 --atomic-expand %s | FileCheck %s --check-prefix=LA64
+
+define i8 @load_acquire_i8(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i8(
+; LA32-NEXT: [[VAL:%.*]] = load atomic i8, ptr [[PTR:%.*]] monotonic, align 1
+; LA32-NEXT: fence acquire
+; LA32-NEXT: ret i8 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i8(
+; LA64-NEXT: [[VAL:%.*]] = load atomic i8, ptr [[PTR:%.*]] monotonic, align 1
+; LA64-NEXT: fence acquire
+; LA64-NEXT: ret i8 [[VAL]]
+;
+ %val = load atomic i8, ptr %ptr acquire, align 1
+ ret i8 %val
+}
+
+define i16 @load_acquire_i16(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i16(
+; LA32-NEXT: [[VAL:%.*]] = load atomic i16, ptr [[PTR:%.*]] monotonic, align 2
+; LA32-NEXT: fence acquire
+; LA32-NEXT: ret i16 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i16(
+; LA64-NEXT: [[VAL:%.*]] = load atomic i16, ptr [[PTR:%.*]] monotonic, align 2
+; LA64-NEXT: fence acquire
+; LA64-NEXT: ret i16 [[VAL]]
+;
+ %val = load atomic i16, ptr %ptr acquire, align 2
+ ret i16 %val
+}
+
+define i32 @load_acquire_i32(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i32(
+; LA32-NEXT: [[VAL:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
+; LA32-NEXT: fence acquire
+; LA32-NEXT: ret i32 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i32(
+; LA64-NEXT: [[VAL:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
+; LA64-NEXT: fence acquire
+; LA64-NEXT: ret i32 [[VAL]]
+;
+ %val = load atomic i32, ptr %ptr acquire, align 4
+ ret i32 %val
+}
+
+define i64 @load_acquire_i64(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i64(
+; LA32-NEXT: [[TMP1:%.*]] = call i64 @__atomic_load_8(ptr [[PTR:%.*]], i32 2)
+; LA32-NEXT: ret i64 [[TMP1]]
+;
+; LA64-LABEL: @load_acquire_i64(
+; LA64-NEXT: [[VAL:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
+; LA64-NEXT: fence acquire
+; LA64-NEXT: ret i64 [[VAL]]
+;
+ %val = load atomic i64, ptr %ptr acquire, align 8
+ ret i64 %val
+}
+
+define void @store_release_i8(ptr %ptr, i8 signext %v) {
+; LA32-LABEL: @store_release_i8(
+; LA32-NEXT: fence release
+; LA32-NEXT: store atomic i8 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 1
+; LA32-NEXT: ret void
+;
+; LA64-LABEL: @store_release_i8(
+; LA64-NEXT: fence release
+; LA64-NEXT: store atomic i8 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 1
+; LA64-NEXT: ret void
+;
+ store atomic i8 %v, ptr %ptr release, align 1
+ ret void
+}
+
+define void @store_release_i16(ptr %ptr, i16 signext %v) {
+; LA32-LABEL: @store_release_i16(
+; LA32-NEXT: fence release
+; LA32-NEXT: store atomic i16 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 2
+; LA32-NEXT: ret void
+;
+; LA64-LABEL: @store_release_i16(
+; LA64-NEXT: fence release
+; LA64-NEXT: store atomic i16 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 2
+; LA64-NEXT: ret void
+;
+ store atomic i16 %v, ptr %ptr release, align 2
+ ret void
+}
+
+define void @store_release_i32(ptr %ptr, i32 signext %v) {
+; LA32-LABEL: @store_release_i32(
+; LA32-NEXT: fence release
+; LA32-NEXT: store atomic i32 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 4
+; LA32-NEXT: ret void
+;
+; LA64-LABEL: @store_release_i32(
+; LA64-NEXT: fence release
+; LA64-NEXT: store atomic i32 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 4
+; LA64-NEXT: ret void
+;
+ store atomic i32 %v, ptr %ptr release, align 4
+ ret void
+}
+
+define void @store_release_i64(ptr %ptr, i64 %v) {
+; LA32-LABEL: @store_release_i64(
+; LA32-NEXT: call void @__atomic_store_8(ptr [[PTR:%.*]], i64 [[V:%.*]], i32 3)
+; LA32-NEXT: ret void
+;
+; LA64-LABEL: @store_release_i64(
+; LA64-NEXT: fence release
+; LA64-NEXT: store atomic i64 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 8
+; LA64-NEXT: ret void
+;
+ store atomic i64 %v, ptr %ptr release, align 8
+ ret void
+}