--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -gvn --basic-aa < %s | FileCheck %s
+
+@u = global i32 5, align 4
+@w = global i32 10, align 4
+
+define i32 @test_load() {
+; CHECK-LABEL: @test_load(
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
+; CHECK-NEXT: ret i32 [[LV]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ %lv = load volatile i32, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_load_with_acquire_load() {
+; CHECK-LABEL: @test_load_with_acquire_load(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w acquire, align 4
+ %lv = load volatile i32, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w acquire, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_load_with_seq_cst_load() {
+; CHECK-LABEL: @test_load_with_seq_cst_load(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[LV:%.*]] = load volatile i32, i32* @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w seq_cst, align 4
+ %lv = load volatile i32, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_store(i32 %x) {
+; CHECK-LABEL: @test_store(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ store volatile i32 %x, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
+
+define i32 @test_store_with_acquire_load(i32 %x) {
+; CHECK-LABEL: @test_store_with_acquire_load(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w acquire, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w acquire, align 4
+ store volatile i32 %x, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w acquire, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
+
+define i32 @test_store_with_seq_cst_load(i32 %x) {
+; CHECK-LABEL: @test_store_with_seq_cst_load(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: store volatile i32 [[X:%.*]], i32* @u, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w seq_cst, align 4
+ store volatile i32 %x, i32* @u, align 4
+ %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}