From 54bd6c840e37bd738253d6ee5e7c2c571dd66860 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Wed, 5 Jun 2019 14:08:01 +0000 Subject: [PATCH] UpdateTestChecks: hexagon support Summary: These tests are being affected by an upcoming patch, so having an understandable (autogenerated) diff is helpful. This target, again, prefers `-march`: ``` llvm/test/CodeGen/Hexagon$ grep -r triple | wc -l 467 llvm/test/CodeGen/Hexagon$ grep -r march | wc -l 1167 ``` Reviewers: RKSimon, kparzysz Reviewed By: kparzysz Subscribers: xbolva00, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D62867 llvm-svn: 362605 --- llvm/test/CodeGen/Hexagon/isel-prefer.ll | 79 +++++++++++++++++++++++++++----- llvm/test/CodeGen/Hexagon/ntstbit.ll | 35 +++++++++++++- llvm/test/CodeGen/Hexagon/tstbit.ll | 11 ++++- llvm/utils/UpdateTestChecks/asm.py | 19 ++++++++ 4 files changed, 130 insertions(+), 14 deletions(-) diff --git a/llvm/test/CodeGen/Hexagon/isel-prefer.ll b/llvm/test/CodeGen/Hexagon/isel-prefer.ll index 4cef003..1b69f56 100644 --- a/llvm/test/CodeGen/Hexagon/isel-prefer.ll +++ b/llvm/test/CodeGen/Hexagon/isel-prefer.ll @@ -1,11 +1,24 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -march=hexagon < %s | FileCheck %s @data1 = external global [2 x [31 x i8]], align 8 @data2 = external global [2 x [91 x i8]], align 8 -; CHECK-LABEL: Prefer_M4_or_andn: -; CHECK: r2 |= and(r0,~r1) define i32 @Prefer_M4_or_andn(i32 %a0, i32 %a1, i32 %a2) #0 { +; CHECK-LABEL: Prefer_M4_or_andn: +; CHECK: // %bb.0: // %b3 +; CHECK-NEXT: { +; CHECK-NEXT: r2 = asl(r2,#5) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r2 |= and(r0,~r1) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = r2 +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b3: %v4 = xor i32 %a1, -1 %v5 = shl i32 %a2, 5 @@ -14,27 +27,51 @@ b3: ret i32 %v7 } -; CHECK-LABEL: Prefer_M4_mpyri_addi: -; CHECK: add(##data1,mpyi(r0,#31)) define i32 @Prefer_M4_mpyri_addi(i32 %a0) #0 { +; CHECK-LABEL: Prefer_M4_mpyri_addi: +; CHECK: // %bb.0: // %b1 +; CHECK-NEXT: { +; CHECK-NEXT: r0 = add(##data1,mpyi(r0,#31)) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b1: %v2 = getelementptr inbounds [2 x [31 x i8]], [2 x [31 x i8]]* @data1, i32 0, i32 %a0 %v3 = ptrtoint [31 x i8]* %v2 to i32 ret i32 %v3 } -; CHECK-LABEL: Prefer_M4_mpyrr_addi: -; CHECK: add(##data2,mpyi(r0,r1)) define i32 @Prefer_M4_mpyrr_addi(i32 %a0) #0 { +; CHECK-LABEL: Prefer_M4_mpyrr_addi: +; CHECK: // %bb.0: // %b1 +; CHECK-NEXT: { +; CHECK-NEXT: r1 = #91 +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = add(##data2,mpyi(r0,r1)) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b1: %v2 = getelementptr inbounds [2 x [91 x i8]], [2 x [91 x i8]]* @data2, i32 0, i32 %a0 %v3 = ptrtoint [91 x i8]* %v2 to i32 ret i32 %v3 } -; CHECK-LABEL: Prefer_S2_tstbit_r: -; CHECK: p0 = tstbit(r0,r1) define i32 @Prefer_S2_tstbit_r(i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: Prefer_S2_tstbit_r: +; CHECK: // %bb.0: // %b2 +; CHECK-NEXT: { +; CHECK-NEXT: p0 = tstbit(r0,r1) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = mux(p0,#1,#0) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b2: %v3 = shl i32 1, %a1 %v4 = and i32 %a0, %v3 @@ -43,9 +80,18 @@ b2: ret i32 %v6 } -; CHECK-LABEL: Prefer_S2_ntstbit_r: -; CHECK: p0 = !tstbit(r0,r1) define i32 @Prefer_S2_ntstbit_r(i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: Prefer_S2_ntstbit_r: +; CHECK: // %bb.0: // %b2 +; CHECK-NEXT: { +; CHECK-NEXT: p0 = !tstbit(r0,r1) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = mux(p0,#1,#0) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b2: %v3 = shl i32 1, %a1 %v4 = and i32 %a0, %v3 @@ -54,9 +100,18 @@ b2: ret i32 %v6 } -; CHECK-LABEL: Prefer_L2_loadrub_io: -; CHECK: memub(r0+#65) define i64 @Prefer_L2_loadrub_io(i8* %a0) #0 { +; CHECK-LABEL: Prefer_L2_loadrub_io: +; CHECK: // %bb.0: // %b1 +; CHECK-NEXT: { +; CHECK-NEXT: r0 = memub(r0+#65) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r1:0 = combine(#0,r0) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b1: %v2 = getelementptr i8, i8* %a0, i32 65 %v3 = load i8, i8* %v2 diff --git a/llvm/test/CodeGen/Hexagon/ntstbit.ll b/llvm/test/CodeGen/Hexagon/ntstbit.ll index 8c6d77e..2b8526d 100644 --- a/llvm/test/CodeGen/Hexagon/ntstbit.ll +++ b/llvm/test/CodeGen/Hexagon/ntstbit.ll @@ -1,8 +1,41 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: !tstbit ; Function Attrs: nounwind define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 { +; CHECK-LABEL: f0: +; CHECK: // %bb.0: // %b0 +; CHECK-NEXT: { +; CHECK-NEXT: p0 = !tstbit(r1,r2) +; CHECK-NEXT: r17:16 = combine(r0,r1) +; CHECK-NEXT: memd(r29+#-16) = r17:16 +; CHECK-NEXT: allocframe(#8) +; CHECK-NEXT: } // 8-byte Folded Spill +; CHECK-NEXT: { +; CHECK-NEXT: if (p0) jump:nt .LBB0_2 +; CHECK-NEXT: } +; CHECK-NEXT: // %bb.1: // %b1 +; CHECK-NEXT: { +; CHECK-NEXT: call f1 +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: jump .LBB0_3 +; CHECK-NEXT: } +; CHECK-NEXT: .LBB0_2: // %b2 +; CHECK-NEXT: { +; CHECK-NEXT: call f2 +; CHECK-NEXT: } +; CHECK-NEXT: .LBB0_3: // %b3 +; CHECK-NEXT: { +; CHECK-NEXT: call f3 +; CHECK-NEXT: r1 = add(r16,#2) +; CHECK-NEXT: r0 = r17 +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = #0 +; CHECK-NEXT: r17:16 = memd(r29+#0) +; CHECK-NEXT: dealloc_return +; CHECK-NEXT: } // 8-byte Folded Reload b0: %v0 = shl i32 1, %a2 %v1 = and i32 %v0, %a1 diff --git a/llvm/test/CodeGen/Hexagon/tstbit.ll b/llvm/test/CodeGen/Hexagon/tstbit.ll index f28f031..ac17990 100644 --- a/llvm/test/CodeGen/Hexagon/tstbit.ll +++ b/llvm/test/CodeGen/Hexagon/tstbit.ll @@ -1,8 +1,17 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -march=hexagon < %s | FileCheck %s -; CHECK: tstbit ; Function Attrs: nounwind readnone define i32 @f0(i32 %a0, i32 %a1) #0 { +; CHECK-LABEL: f0: +; CHECK: // %bb.0: // %b0 +; CHECK-NEXT: { +; CHECK-NEXT: p0 = tstbit(r0,r1) +; CHECK-NEXT: } +; CHECK-NEXT: { +; CHECK-NEXT: r0 = mux(p0,#1,#0) +; CHECK-NEXT: jumpr r31 +; CHECK-NEXT: } b0: %v0 = shl i32 1, %a1 %v1 = and i32 %v0, %a0 diff --git a/llvm/utils/UpdateTestChecks/asm.py b/llvm/utils/UpdateTestChecks/asm.py index 7fb93fa..a27cd04 100644 --- a/llvm/utils/UpdateTestChecks/asm.py +++ b/llvm/utils/UpdateTestChecks/asm.py @@ -42,6 +42,13 @@ ASM_FUNCTION_AMDGPU_RE = re.compile( r'.Lfunc_end[0-9]+:\n', flags=(re.M | re.S)) +ASM_FUNCTION_HEXAGON_RE = re.compile( + r'^_?(?P[^:]+):[ \t]*//[ \t]*@(?P=func)\n[^:]*?' + r'(?P.*?)\n' # (body of the function) + # This list is incomplete + r'.Lfunc_end[0-9]+:\n', + flags=(re.M | re.S)) + ASM_FUNCTION_MIPS_RE = re.compile( r'^_?(?P[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' # f: (name of func) r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+' # Mips+LLVM standard asm prologue @@ -161,6 +168,16 @@ def scrub_asm_arm_eabi(asm, args): asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm) return asm +def scrub_asm_hexagon(asm, args): + # Scrub runs of whitespace out of the assembly, but leave the leading + # whitespace in place. + asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm) + # Expand the tabs used for indentation. + asm = string.expandtabs(asm, 2) + # Strip trailing whitespace. + asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm) + return asm + def scrub_asm_powerpc(asm, args): # Scrub runs of whitespace out of the assembly, but leave the leading # whitespace in place. @@ -239,6 +256,7 @@ def get_triple_from_march(march): 'r600': 'r600', 'mips': 'mips', 'sparc': 'sparc', + 'hexagon': 'hexagon', } for prefix, triple in triples.items(): if march.startswith(prefix): @@ -254,6 +272,7 @@ def build_function_body_dictionary_for_triple(args, raw_tool_output, triple, pre 'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE), 'arm64-eabi': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE), 'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE), + 'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE), 'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE), 'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE), 'arm-eabi': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE), -- 2.7.4