From: David Green Date: Fri, 24 Sep 2021 08:08:59 +0000 (+0100) Subject: [AArch64] Rewrite ldst-unsignedimm.ll codegen test. X-Git-Tag: upstream/15.0.7~30636 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=2b23db78a33c2a6852ca098ebfe9b5636ef7d980;p=platform%2Fupstream%2Fllvm.git [AArch64] Rewrite ldst-unsignedimm.ll codegen test. Instead of relying on many volatile loads/stores in a single function, rewrite the test to use separate functions as any other test would. --- diff --git a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll index 53e8414..76224e8 100644 --- a/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll +++ b/llvm/test/CodeGen/AArch64/ldst-unsignedimm.ll @@ -1,5 +1,6 @@ -; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck %s -; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefix=CHECK-NOFP %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -verify-machineinstrs -o - %s -mtriple=aarch64-none-linux-gnu | FileCheck --check-prefixes=CHECK,CHECK-FP %s +; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu -mattr=-fp-armv8 | FileCheck --check-prefixes=CHECK,CHECK-NOFP %s @var_8bit = dso_local global i8 0 @var_16bit = dso_local global i16 0 @@ -9,148 +10,210 @@ @var_float = dso_local global float 0.0 @var_double = dso_local global double 0.0 -define dso_local void @ldst_8bit() { -; CHECK-LABEL: ldst_8bit: - -; No architectural support for loads to 16-bit or 8-bit since we -; promote i8 during lowering. - -; match a sign-extending load 8-bit -> 32-bit - %val8_sext32 = load volatile i8, i8* @var_8bit +define i32 @ld_s8_32() { +; CHECK-LABEL: ld_s8_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: ldrsb w0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret + %val8_sext32 = load i8, i8* @var_8bit %val32_signed = sext i8 %val8_sext32 to i32 - store volatile i32 %val32_signed, i32* @var_32bit -; CHECK: adrp {{x[0-9]+}}, var_8bit -; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] + ret i32 %val32_signed +} -; match a zero-extending load volatile 8-bit -> 32-bit - %val8_zext32 = load volatile i8, i8* @var_8bit +define i32 @ld_u8_32() { +; CHECK-LABEL: ld_u8_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: ldrb w0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret + %val8_zext32 = load i8, i8* @var_8bit %val32_unsigned = zext i8 %val8_zext32 to i32 - store volatile i32 %val32_unsigned, i32* @var_32bit -; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] - -; match an any-extending load volatile 8-bit -> 32-bit - %val8_anyext = load volatile i8, i8* @var_8bit - %newval8 = add i8 %val8_anyext, 1 - store volatile i8 %newval8, i8* @var_8bit -; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] + ret i32 %val32_unsigned +} -; match a sign-extending load volatile 8-bit -> 64-bit - %val8_sext64 = load volatile i8, i8* @var_8bit +define i64 @ld_s8_64() { +; CHECK-LABEL: ld_s8_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: ldrsb x0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret + %val8_sext64 = load i8, i8* @var_8bit %val64_signed = sext i8 %val8_sext64 to i64 - store volatile i64 %val64_signed, i64* @var_64bit -; CHECK: ldrsb {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] + ret i64 %val64_signed +} -; match a zero-extending load volatile 8-bit -> 64-bit. -; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits -; of x0 so it's identical to load volatileing to 32-bits. - %val8_zext64 = load volatile i8, i8* @var_8bit +define i64 @ld_u8_64() { +; CHECK-LABEL: ld_u8_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: ldrb w0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret + %val8_zext64 = load i8, i8* @var_8bit %val64_unsigned = zext i8 %val8_zext64 to i64 - store volatile i64 %val64_unsigned, i64* @var_64bit -; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] + ret i64 %val64_unsigned +} -; truncating store volatile 32-bits to 8-bits - %val32 = load volatile i32, i32* @var_32bit +define i8 @ld_a8_8() { +; CHECK-LABEL: ld_a8_8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: ldrb w8, [x8, :lo12:var_8bit] +; CHECK-NEXT: add w0, w8, #1 +; CHECK-NEXT: ret + %val8_anyext = load i8, i8* @var_8bit + %newval8 = add i8 %val8_anyext, 1 + ret i8 %newval8 +} + +define void @st_i32_8(i32 %val32) { +; CHECK-LABEL: st_i32_8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: strb w0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret %val8_trunc32 = trunc i32 %val32 to i8 - store volatile i8 %val8_trunc32, i8* @var_8bit -; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] + store i8 %val8_trunc32, i8* @var_8bit + ret void +} -; truncating store volatile 64-bits to 8-bits - %val64 = load volatile i64, i64* @var_64bit +define void @st_i64_8(i64 %val64) { +; CHECK-LABEL: st_i64_8: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_8bit +; CHECK-NEXT: strb w0, [x8, :lo12:var_8bit] +; CHECK-NEXT: ret %val8_trunc64 = trunc i64 %val64 to i8 - store volatile i8 %val8_trunc64, i8* @var_8bit -; CHECK: strb {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_8bit] - - ret void + store i8 %val8_trunc64, i8* @var_8bit + ret void } -define dso_local void @ldst_16bit() { -; CHECK-LABEL: ldst_16bit: -; No architectural support for load volatiles to 16-bit promote i16 during -; lowering. - -; match a sign-extending load volatile 16-bit -> 32-bit - %val16_sext32 = load volatile i16, i16* @var_16bit - %val32_signed = sext i16 %val16_sext32 to i32 - store volatile i32 %val32_signed, i32* @var_32bit -; CHECK: adrp {{x[0-9]+}}, var_16bit -; CHECK: ldrsh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] +define i32 @ld_s16_32() { +; CHECK-LABEL: ld_s16_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: ldrsh w0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret + %val16_sext32 = load i16, i16* @var_16bit + %val32_signed = sext i16 %val16_sext32 to i32 + ret i32 %val32_signed +} -; match a zero-extending load volatile 16-bit -> 32-bit - %val16_zext32 = load volatile i16, i16* @var_16bit +define i32 @ld_u16_32() { +; CHECK-LABEL: ld_u16_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: ldrh w0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret + %val16_zext32 = load i16, i16* @var_16bit %val32_unsigned = zext i16 %val16_zext32 to i32 - store volatile i32 %val32_unsigned, i32* @var_32bit -; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] - -; match an any-extending load volatile 16-bit -> 32-bit - %val16_anyext = load volatile i16, i16* @var_16bit - %newval16 = add i16 %val16_anyext, 1 - store volatile i16 %newval16, i16* @var_16bit -; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] + ret i32 %val32_unsigned +} -; match a sign-extending load volatile 16-bit -> 64-bit - %val16_sext64 = load volatile i16, i16* @var_16bit +define i64 @ld_s16_64() { +; CHECK-LABEL: ld_s16_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: ldrsh x0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret + %val16_sext64 = load i16, i16* @var_16bit %val64_signed = sext i16 %val16_sext64 to i64 - store volatile i64 %val64_signed, i64* @var_64bit -; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] + ret i64 %val64_signed +} -; match a zero-extending load volatile 16-bit -> 64-bit. -; This uses the fact that ldrb w0, [x0] will zero out the high 32-bits -; of x0 so it's identical to load volatileing to 32-bits. - %val16_zext64 = load volatile i16, i16* @var_16bit +define i64 @ld_u16_64() { +; CHECK-LABEL: ld_u16_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: ldrh w0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret + %val16_zext64 = load i16, i16* @var_16bit %val64_unsigned = zext i16 %val16_zext64 to i64 - store volatile i64 %val64_unsigned, i64* @var_64bit -; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] + ret i64 %val64_unsigned +} -; truncating store volatile 32-bits to 16-bits - %val32 = load volatile i32, i32* @var_32bit +define i16 @ld_a16_16() { +; CHECK-LABEL: ld_a16_16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: ldrh w8, [x8, :lo12:var_16bit] +; CHECK-NEXT: add w0, w8, #1 +; CHECK-NEXT: ret + %val16_anyext = load i16, i16* @var_16bit + %newval16 = add i16 %val16_anyext, 1 + ret i16 %newval16 +} + +define void @st_i32_16(i32 %val32) { +; CHECK-LABEL: st_i32_16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: strh w0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret %val16_trunc32 = trunc i32 %val32 to i16 - store volatile i16 %val16_trunc32, i16* @var_16bit -; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] + store i16 %val16_trunc32, i16* @var_16bit + ret void +} -; truncating store volatile 64-bits to 16-bits - %val64 = load volatile i64, i64* @var_64bit +define void @st_i64_16(i64 %val64) { +; CHECK-LABEL: st_i64_16: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_16bit +; CHECK-NEXT: strh w0, [x8, :lo12:var_16bit] +; CHECK-NEXT: ret %val16_trunc64 = trunc i64 %val64 to i16 - store volatile i16 %val16_trunc64, i16* @var_16bit -; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_16bit] - + store i16 %val16_trunc64, i16* @var_16bit ret void } -define dso_local void @ldst_32bit() { -; CHECK-LABEL: ldst_32bit: - -; Straight 32-bit load/store - %val32_noext = load volatile i32, i32* @var_32bit - store volatile i32 %val32_noext, i32* @var_32bit -; CHECK: adrp {{x[0-9]+}}, var_32bit -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit] -; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit] - -; Zero-extension to 64-bits - %val32_zext = load volatile i32, i32* @var_32bit - %val64_unsigned = zext i32 %val32_zext to i64 - store volatile i64 %val64_unsigned, i64* @var_64bit -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit] -; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit] - -; Sign-extension to 64-bits - %val32_sext = load volatile i32, i32* @var_32bit - %val64_signed = sext i32 %val32_sext to i64 - store volatile i64 %val64_signed, i64* @var_64bit -; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit] -; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit] - -; Truncation from 64-bits - %val64_trunc = load volatile i64, i64* @var_64bit - %val32_trunc = trunc i64 %val64_trunc to i32 - store volatile i32 %val32_trunc, i32* @var_32bit -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_64bit] -; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_32bit] +define i64 @ld_s32_64() { +; CHECK-LABEL: ld_s32_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_32bit +; CHECK-NEXT: ldrsw x0, [x8, :lo12:var_32bit] +; CHECK-NEXT: ret + %val32_sext64 = load i32, i32* @var_32bit + %val64_signed = sext i32 %val32_sext64 to i64 + ret i64 %val64_signed +} + +define i64 @ld_u32_64() { +; CHECK-LABEL: ld_u32_64: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_32bit +; CHECK-NEXT: ldr w0, [x8, :lo12:var_32bit] +; CHECK-NEXT: ret + %val32_zext64 = load i32, i32* @var_32bit + %val64_unsigned = zext i32 %val32_zext64 to i64 + ret i64 %val64_unsigned +} + +define i32 @ld_a32_32() { +; CHECK-LABEL: ld_a32_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_32bit +; CHECK-NEXT: ldr w8, [x8, :lo12:var_32bit] +; CHECK-NEXT: add w0, w8, #1 +; CHECK-NEXT: ret + %val32_anyext = load i32, i32* @var_32bit + %newval32 = add i32 %val32_anyext, 1 + ret i32 %newval32 +} + +define void @st_i64_32(i64 %val64) { +; CHECK-LABEL: st_i64_32: +; CHECK: // %bb.0: +; CHECK-NEXT: adrp x8, var_32bit +; CHECK-NEXT: str w0, [x8, :lo12:var_32bit] +; CHECK-NEXT: ret + %val32_trunc64 = trunc i64 %val64 to i32 + store i32 %val32_trunc64, i32* @var_32bit ret void } + @arr8 = dso_local global i8* null @arr16 = dso_local global i16* null @arr32 = dso_local global i32* null @@ -161,96 +224,120 @@ define dso_local void @ldst_32bit() { ; stores. Since all forms use the same Operand it's only necessary to ; check the various access-sizes involved. -define dso_local void @ldst_complex_offsets() { -; CHECK: ldst_complex_offsets - %arr8_addr = load volatile i8*, i8** @arr8 -; CHECK: adrp {{x[0-9]+}}, arr8 -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr8] - +define i8 @ld_i8_1(i8* %arr8_addr) { +; CHECK-LABEL: ld_i8_1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldrb w0, [x0, #1] +; CHECK-NEXT: ret %arr8_sub1_addr = getelementptr i8, i8* %arr8_addr, i64 1 %arr8_sub1 = load volatile i8, i8* %arr8_sub1_addr - store volatile i8 %arr8_sub1, i8* @var_8bit -; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #1] + ret i8 %arr8_sub1 +} +define i8 @ld_i8_4095(i8* %arr8_addr) { +; CHECK-LABEL: ld_i8_4095: +; CHECK: // %bb.0: +; CHECK-NEXT: ldrb w0, [x0, #4095] +; CHECK-NEXT: ret %arr8_sub4095_addr = getelementptr i8, i8* %arr8_addr, i64 4095 %arr8_sub4095 = load volatile i8, i8* %arr8_sub4095_addr - store volatile i8 %arr8_sub4095, i8* @var_8bit -; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, #4095] - - - %arr16_addr = load volatile i16*, i16** @arr16 -; CHECK: adrp {{x[0-9]+}}, arr16 -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr16] + ret i8 %arr8_sub4095 +} +define i16 @ld_i16_1(i16* %arr16_addr) { +; CHECK-LABEL: ld_i16_1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldrh w0, [x0, #2] +; CHECK-NEXT: ret %arr16_sub1_addr = getelementptr i16, i16* %arr16_addr, i64 1 %arr16_sub1 = load volatile i16, i16* %arr16_sub1_addr - store volatile i16 %arr16_sub1, i16* @var_16bit -; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #2] + ret i16 %arr16_sub1 +} +define i16 @ld_i16_4095(i16* %arr16_addr) { +; CHECK-LABEL: ld_i16_4095: +; CHECK: // %bb.0: +; CHECK-NEXT: ldrh w0, [x0, #8190] +; CHECK-NEXT: ret %arr16_sub4095_addr = getelementptr i16, i16* %arr16_addr, i64 4095 %arr16_sub4095 = load volatile i16, i16* %arr16_sub4095_addr - store volatile i16 %arr16_sub4095, i16* @var_16bit -; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, #8190] - - - %arr32_addr = load volatile i32*, i32** @arr32 -; CHECK: adrp {{x[0-9]+}}, arr32 -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr32] + ret i16 %arr16_sub4095 +} +define i32 @ld_i32_1(i32* %arr32_addr) { +; CHECK-LABEL: ld_i32_1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr w0, [x0, #4] +; CHECK-NEXT: ret %arr32_sub1_addr = getelementptr i32, i32* %arr32_addr, i64 1 %arr32_sub1 = load volatile i32, i32* %arr32_sub1_addr - store volatile i32 %arr32_sub1, i32* @var_32bit -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #4] + ret i32 %arr32_sub1 +} +define i32 @ld_i32_4095(i32* %arr32_addr) { +; CHECK-LABEL: ld_i32_4095: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr w0, [x0, #16380] +; CHECK-NEXT: ret %arr32_sub4095_addr = getelementptr i32, i32* %arr32_addr, i64 4095 %arr32_sub4095 = load volatile i32, i32* %arr32_sub4095_addr - store volatile i32 %arr32_sub4095, i32* @var_32bit -; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, #16380] - - - %arr64_addr = load volatile i64*, i64** @arr64 -; CHECK: adrp {{x[0-9]+}}, arr64 -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:arr64] + ret i32 %arr32_sub4095 +} +define i64 @ld_i64_1(i64* %arr64_addr) { +; CHECK-LABEL: ld_i64_1: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr x0, [x0, #8] +; CHECK-NEXT: ret %arr64_sub1_addr = getelementptr i64, i64* %arr64_addr, i64 1 %arr64_sub1 = load volatile i64, i64* %arr64_sub1_addr - store volatile i64 %arr64_sub1, i64* @var_64bit -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #8] + ret i64 %arr64_sub1 +} +define i64 @ld_i64_4095(i64* %arr64_addr) { +; CHECK-LABEL: ld_i64_4095: +; CHECK: // %bb.0: +; CHECK-NEXT: ldr x0, [x0, #32760] +; CHECK-NEXT: ret %arr64_sub4095_addr = getelementptr i64, i64* %arr64_addr, i64 4095 %arr64_sub4095 = load volatile i64, i64* %arr64_sub4095_addr - store volatile i64 %arr64_sub4095, i64* @var_64bit -; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, #32760] - - ret void + ret i64 %arr64_sub4095 } define dso_local void @ldst_float() { -; CHECK-LABEL: ldst_float: - - %valfp = load volatile float, float* @var_float -; CHECK: adrp {{x[0-9]+}}, var_float -; CHECK: ldr {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float] -; CHECK-NOFP-NOT: ldr {{s[0-9]+}}, - +; CHECK-FP-LABEL: ldst_float: +; CHECK-FP: // %bb.0: +; CHECK-FP-NEXT: adrp x8, var_float +; CHECK-FP-NEXT: ldr s0, [x8, :lo12:var_float] +; CHECK-FP-NEXT: str s0, [x8, :lo12:var_float] +; CHECK-FP-NEXT: ret +; +; CHECK-NOFP-LABEL: ldst_float: +; CHECK-NOFP: // %bb.0: +; CHECK-NOFP-NEXT: adrp x8, var_float +; CHECK-NOFP-NEXT: ldr w9, [x8, :lo12:var_float] +; CHECK-NOFP-NEXT: str w9, [x8, :lo12:var_float] +; CHECK-NOFP-NEXT: ret + %valfp = load volatile float, float* @var_float store volatile float %valfp, float* @var_float -; CHECK: str {{s[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_float] -; CHECK-NOFP-NOT: str {{s[0-9]+}}, - - ret void + ret void } define dso_local void @ldst_double() { -; CHECK-LABEL: ldst_double: - - %valfp = load volatile double, double* @var_double -; CHECK: adrp {{x[0-9]+}}, var_double -; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double] -; CHECK-NOFP-NOT: ldr {{d[0-9]+}}, - +; CHECK-FP-LABEL: ldst_double: +; CHECK-FP: // %bb.0: +; CHECK-FP-NEXT: adrp x8, var_double +; CHECK-FP-NEXT: ldr d0, [x8, :lo12:var_double] +; CHECK-FP-NEXT: str d0, [x8, :lo12:var_double] +; CHECK-FP-NEXT: ret +; +; CHECK-NOFP-LABEL: ldst_double: +; CHECK-NOFP: // %bb.0: +; CHECK-NOFP-NEXT: adrp x8, var_double +; CHECK-NOFP-NEXT: ldr x9, [x8, :lo12:var_double] +; CHECK-NOFP-NEXT: str x9, [x8, :lo12:var_double] +; CHECK-NOFP-NEXT: ret + %valfp = load volatile double, double* @var_double store volatile double %valfp, double* @var_double -; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}, {{#?}}:lo12:var_double] -; CHECK-NOFP-NOT: str {{d[0-9]+}}, - - ret void + ret void }