+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=hexagon < %s | FileCheck %s
@data1 = external global [2 x [31 x i8]], align 8
@data2 = external global [2 x [91 x i8]], align 8
-; CHECK-LABEL: Prefer_M4_or_andn:
-; CHECK: r2 |= and(r0,~r1)
define i32 @Prefer_M4_or_andn(i32 %a0, i32 %a1, i32 %a2) #0 {
+; CHECK-LABEL: Prefer_M4_or_andn:
+; CHECK: // %bb.0: // %b3
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 = asl(r2,#5)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r2 |= and(r0,~r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = r2
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b3:
%v4 = xor i32 %a1, -1
%v5 = shl i32 %a2, 5
ret i32 %v7
}
-; CHECK-LABEL: Prefer_M4_mpyri_addi:
-; CHECK: add(##data1,mpyi(r0,#31))
define i32 @Prefer_M4_mpyri_addi(i32 %a0) #0 {
+; CHECK-LABEL: Prefer_M4_mpyri_addi:
+; CHECK: // %bb.0: // %b1
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = add(##data1,mpyi(r0,#31))
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b1:
%v2 = getelementptr inbounds [2 x [31 x i8]], [2 x [31 x i8]]* @data1, i32 0, i32 %a0
%v3 = ptrtoint [31 x i8]* %v2 to i32
ret i32 %v3
}
-; CHECK-LABEL: Prefer_M4_mpyrr_addi:
-; CHECK: add(##data2,mpyi(r0,r1))
define i32 @Prefer_M4_mpyrr_addi(i32 %a0) #0 {
+; CHECK-LABEL: Prefer_M4_mpyrr_addi:
+; CHECK: // %bb.0: // %b1
+; CHECK-NEXT: {
+; CHECK-NEXT: r1 = #91
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = add(##data2,mpyi(r0,r1))
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b1:
%v2 = getelementptr inbounds [2 x [91 x i8]], [2 x [91 x i8]]* @data2, i32 0, i32 %a0
%v3 = ptrtoint [91 x i8]* %v2 to i32
ret i32 %v3
}
-; CHECK-LABEL: Prefer_S2_tstbit_r:
-; CHECK: p0 = tstbit(r0,r1)
define i32 @Prefer_S2_tstbit_r(i32 %a0, i32 %a1) #0 {
+; CHECK-LABEL: Prefer_S2_tstbit_r:
+; CHECK: // %bb.0: // %b2
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = tstbit(r0,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = mux(p0,#1,#0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b2:
%v3 = shl i32 1, %a1
%v4 = and i32 %a0, %v3
ret i32 %v6
}
-; CHECK-LABEL: Prefer_S2_ntstbit_r:
-; CHECK: p0 = !tstbit(r0,r1)
define i32 @Prefer_S2_ntstbit_r(i32 %a0, i32 %a1) #0 {
+; CHECK-LABEL: Prefer_S2_ntstbit_r:
+; CHECK: // %bb.0: // %b2
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = !tstbit(r0,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = mux(p0,#1,#0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b2:
%v3 = shl i32 1, %a1
%v4 = and i32 %a0, %v3
ret i32 %v6
}
-; CHECK-LABEL: Prefer_L2_loadrub_io:
-; CHECK: memub(r0+#65)
define i64 @Prefer_L2_loadrub_io(i8* %a0) #0 {
+; CHECK-LABEL: Prefer_L2_loadrub_io:
+; CHECK: // %bb.0: // %b1
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = memub(r0+#65)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r1:0 = combine(#0,r0)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b1:
%v2 = getelementptr i8, i8* %a0, i32 65
%v3 = load i8, i8* %v2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: !tstbit
; Function Attrs: nounwind
define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+; CHECK-LABEL: f0:
+; CHECK: // %bb.0: // %b0
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = !tstbit(r1,r2)
+; CHECK-NEXT: r17:16 = combine(r0,r1)
+; CHECK-NEXT: memd(r29+#-16) = r17:16
+; CHECK-NEXT: allocframe(#8)
+; CHECK-NEXT: } // 8-byte Folded Spill
+; CHECK-NEXT: {
+; CHECK-NEXT: if (p0) jump:nt .LBB0_2
+; CHECK-NEXT: }
+; CHECK-NEXT: // %bb.1: // %b1
+; CHECK-NEXT: {
+; CHECK-NEXT: call f1
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: jump .LBB0_3
+; CHECK-NEXT: }
+; CHECK-NEXT: .LBB0_2: // %b2
+; CHECK-NEXT: {
+; CHECK-NEXT: call f2
+; CHECK-NEXT: }
+; CHECK-NEXT: .LBB0_3: // %b3
+; CHECK-NEXT: {
+; CHECK-NEXT: call f3
+; CHECK-NEXT: r1 = add(r16,#2)
+; CHECK-NEXT: r0 = r17
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = #0
+; CHECK-NEXT: r17:16 = memd(r29+#0)
+; CHECK-NEXT: dealloc_return
+; CHECK-NEXT: } // 8-byte Folded Reload
b0:
%v0 = shl i32 1, %a2
%v1 = and i32 %v0, %a1
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=hexagon < %s | FileCheck %s
-; CHECK: tstbit
; Function Attrs: nounwind readnone
define i32 @f0(i32 %a0, i32 %a1) #0 {
+; CHECK-LABEL: f0:
+; CHECK: // %bb.0: // %b0
+; CHECK-NEXT: {
+; CHECK-NEXT: p0 = tstbit(r0,r1)
+; CHECK-NEXT: }
+; CHECK-NEXT: {
+; CHECK-NEXT: r0 = mux(p0,#1,#0)
+; CHECK-NEXT: jumpr r31
+; CHECK-NEXT: }
b0:
%v0 = shl i32 1, %a1
%v1 = and i32 %v0, %a0
r'.Lfunc_end[0-9]+:\n',
flags=(re.M | re.S))
+ASM_FUNCTION_HEXAGON_RE = re.compile(
+ r'^_?(?P<func>[^:]+):[ \t]*//[ \t]*@(?P=func)\n[^:]*?'
+ r'(?P<body>.*?)\n' # (body of the function)
+ # This list is incomplete
+ r'.Lfunc_end[0-9]+:\n',
+ flags=(re.M | re.S))
+
ASM_FUNCTION_MIPS_RE = re.compile(
r'^_?(?P<func>[^:]+):[ \t]*#+[ \t]*@(?P=func)\n[^:]*?' # f: (name of func)
r'(?:^[ \t]+\.(frame|f?mask|set).*?\n)+' # Mips+LLVM standard asm prologue
asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
return asm
+def scrub_asm_hexagon(asm, args):
+ # Scrub runs of whitespace out of the assembly, but leave the leading
+ # whitespace in place.
+ asm = common.SCRUB_WHITESPACE_RE.sub(r' ', asm)
+ # Expand the tabs used for indentation.
+ asm = string.expandtabs(asm, 2)
+ # Strip trailing whitespace.
+ asm = common.SCRUB_TRAILING_WHITESPACE_RE.sub(r'', asm)
+ return asm
+
def scrub_asm_powerpc(asm, args):
# Scrub runs of whitespace out of the assembly, but leave the leading
# whitespace in place.
'r600': 'r600',
'mips': 'mips',
'sparc': 'sparc',
+ 'hexagon': 'hexagon',
}
for prefix, triple in triples.items():
if march.startswith(prefix):
'i386': (scrub_asm_x86, ASM_FUNCTION_X86_RE),
'arm64-eabi': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
'aarch64': (scrub_asm_arm_eabi, ASM_FUNCTION_AARCH64_RE),
+ 'hexagon': (scrub_asm_hexagon, ASM_FUNCTION_HEXAGON_RE),
'r600': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'amdgcn': (scrub_asm_amdgpu, ASM_FUNCTION_AMDGPU_RE),
'arm-eabi': (scrub_asm_arm_eabi, ASM_FUNCTION_ARM_RE),