From 9fdf21f3d07d5efafec4334b1b4d200bc7811c05 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 21 Dec 2022 12:08:07 -0800 Subject: [PATCH] [RISCV] Add test cases for i8/i16 abs followed by zext. The andi, zext.h and slli+srli shift pairs at the end of the generated output are unnecessary if the input is sign extended. --- llvm/test/CodeGen/RISCV/iabs.ll | 72 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/iabs.ll b/llvm/test/CodeGen/RISCV/iabs.ll index 9ab9290..727d6e7 100644 --- a/llvm/test/CodeGen/RISCV/iabs.ll +++ b/llvm/test/CodeGen/RISCV/iabs.ll @@ -509,3 +509,75 @@ define i64 @zext_abs32(i32 %x) { %zext = zext i32 %abs to i64 ret i64 %zext } + +define signext i32 @zext_abs8(i8 signext %x) { +; RV32I-LABEL: zext_abs8: +; RV32I: # %bb.0: +; RV32I-NEXT: srai a1, a0, 7 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: andi a0, a0, 255 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: zext_abs8: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: andi a0, a0, 255 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: zext_abs8: +; RV64I: # %bb.0: +; RV64I-NEXT: srai a1, a0, 7 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: andi a0, a0, 255 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: zext_abs8: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: andi a0, a0, 255 +; RV64ZBB-NEXT: ret + %a = call i8 @llvm.abs.i8(i8 %x, i1 false) + %b = zext i8 %a to i32 + ret i32 %b +} + +define signext i32 @zext_abs16(i16 signext %x) { +; RV32I-LABEL: zext_abs16: +; RV32I: # %bb.0: +; RV32I-NEXT: srai a1, a0, 15 +; RV32I-NEXT: xor a0, a0, a1 +; RV32I-NEXT: sub a0, a0, a1 +; RV32I-NEXT: slli a0, a0, 16 +; RV32I-NEXT: srli a0, a0, 16 +; RV32I-NEXT: ret +; +; RV32ZBB-LABEL: zext_abs16: +; RV32ZBB: # %bb.0: +; RV32ZBB-NEXT: neg a1, a0 +; RV32ZBB-NEXT: max a0, a0, a1 +; RV32ZBB-NEXT: zext.h a0, a0 +; RV32ZBB-NEXT: ret +; +; RV64I-LABEL: zext_abs16: +; RV64I: # %bb.0: +; RV64I-NEXT: srai a1, a0, 15 +; RV64I-NEXT: xor a0, a0, a1 +; RV64I-NEXT: subw a0, a0, a1 +; RV64I-NEXT: slli a0, a0, 48 +; RV64I-NEXT: srli a0, a0, 48 +; RV64I-NEXT: ret +; +; RV64ZBB-LABEL: zext_abs16: +; RV64ZBB: # %bb.0: +; RV64ZBB-NEXT: neg a1, a0 +; RV64ZBB-NEXT: max a0, a0, a1 +; RV64ZBB-NEXT: zext.h a0, a0 +; RV64ZBB-NEXT: ret + %a = call i16 @llvm.abs.i16(i16 %x, i1 false) + %b = zext i16 %a to i32 + ret i32 %b +} -- 2.7.4