From 599421ae36c332474c5034b4baaab59833e76418 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 15 Jul 2023 11:19:18 -0700 Subject: [PATCH] [RISCV] Use unsigned instead of signed types for Zk* and Zb* builtins. Unsigned is a better representation for bitmanipulation and cryptography.w The only exception being the return values for clz and ctz intrinsics is a signed int. That matches the target independent clz and ctz builtins. This is consistent with the current scalar crypto proposal https://github.com/riscv-non-isa/riscv-c-api-doc/pull/44 Reviewed By: VincentWu Differential Revision: https://reviews.llvm.org/D154616 --- clang/include/clang/Basic/BuiltinsRISCV.def | 74 +++++++++++----------- .../CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c | 8 ++- .../CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c | 6 +- .../RISCV/rvb-intrinsics/riscv64-zbkb-error.c | 6 +- .../CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c | 6 +- .../CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c | 6 +- .../CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c | 6 +- .../CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c | 6 +- .../CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c | 22 ++++--- .../CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c | 6 +- .../CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c | 4 +- .../RISCV/rvk-intrinsics/riscv64-zknd-zkne.c | 6 +- .../CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c | 7 +- .../CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c | 5 +- .../CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c | 17 ++--- .../CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c | 4 +- .../CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c | 4 +- 17 files changed, 108 insertions(+), 85 deletions(-) diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def index 98c8663..0e157a0 100644 --- a/clang/include/clang/Basic/BuiltinsRISCV.def +++ b/clang/include/clang/Basic/BuiltinsRISCV.def @@ -32,58 +32,58 @@ TARGET_BUILTIN(__builtin_riscv_clmulr_32, "UiUiUi", "nc", "zbc,32bit") TARGET_BUILTIN(__builtin_riscv_clmulr_64, "UWiUWiUWi", "nc", "zbc,64bit") // Zbkx -TARGET_BUILTIN(__builtin_riscv_xperm4_32, "iii", "nc", "zbkx,32bit") -TARGET_BUILTIN(__builtin_riscv_xperm4_64, "WiWiWi", "nc", "zbkx,64bit") -TARGET_BUILTIN(__builtin_riscv_xperm8_32, "iii", "nc", "zbkx,32bit") -TARGET_BUILTIN(__builtin_riscv_xperm8_64, "WiWiWi", "nc", "zbkx,64bit") +TARGET_BUILTIN(__builtin_riscv_xperm4_32, "UiUiUi", "nc", "zbkx,32bit") +TARGET_BUILTIN(__builtin_riscv_xperm4_64, "UWiUWiUWi", "nc", "zbkx,64bit") +TARGET_BUILTIN(__builtin_riscv_xperm8_32, "UiUiUi", "nc", "zbkx,32bit") +TARGET_BUILTIN(__builtin_riscv_xperm8_64, "UWiUWiUWi", "nc", "zbkx,64bit") // Zbkb extension -TARGET_BUILTIN(__builtin_riscv_brev8_32, "ii", "nc", "zbkb") -TARGET_BUILTIN(__builtin_riscv_brev8_64, "WiWi", "nc", "zbkb,64bit") -TARGET_BUILTIN(__builtin_riscv_zip_32, "ZiZi", "nc", "zbkb,32bit") -TARGET_BUILTIN(__builtin_riscv_unzip_32, "ZiZi", "nc", "zbkb,32bit") +TARGET_BUILTIN(__builtin_riscv_brev8_32, "UiUi", "nc", "zbkb") +TARGET_BUILTIN(__builtin_riscv_brev8_64, "UWiUWi", "nc", "zbkb,64bit") +TARGET_BUILTIN(__builtin_riscv_zip_32, "UiUi", "nc", "zbkb,32bit") +TARGET_BUILTIN(__builtin_riscv_unzip_32, "UiUi", "nc", "zbkb,32bit") // Zknd extension -TARGET_BUILTIN(__builtin_riscv_aes32dsi_32, "ZiZiZiIUi", "nc", "zknd,32bit") -TARGET_BUILTIN(__builtin_riscv_aes32dsmi_32, "ZiZiZiIUi", "nc", "zknd,32bit") -TARGET_BUILTIN(__builtin_riscv_aes64ds_64, "WiWiWi", "nc", "zknd,64bit") -TARGET_BUILTIN(__builtin_riscv_aes64dsm_64, "WiWiWi", "nc", "zknd,64bit") -TARGET_BUILTIN(__builtin_riscv_aes64im_64, "WiWi", "nc", "zknd,64bit") +TARGET_BUILTIN(__builtin_riscv_aes32dsi_32, "UiUiUiIUi", "nc", "zknd,32bit") +TARGET_BUILTIN(__builtin_riscv_aes32dsmi_32, "UiUiUiIUi", "nc", "zknd,32bit") +TARGET_BUILTIN(__builtin_riscv_aes64ds_64, "UWiUWiUWi", "nc", "zknd,64bit") +TARGET_BUILTIN(__builtin_riscv_aes64dsm_64, "UWiUWiUWi", "nc", "zknd,64bit") +TARGET_BUILTIN(__builtin_riscv_aes64im_64, "UWiUWi", "nc", "zknd,64bit") // Zknd & zkne -TARGET_BUILTIN(__builtin_riscv_aes64ks1i_64, "WiWiIUi", "nc", "zknd|zkne,64bit") -TARGET_BUILTIN(__builtin_riscv_aes64ks2_64, "WiWiWi", "nc", "zknd|zkne,64bit") +TARGET_BUILTIN(__builtin_riscv_aes64ks1i_64, "UWiUWiIUi", "nc", "zknd|zkne,64bit") +TARGET_BUILTIN(__builtin_riscv_aes64ks2_64, "UWiUWiUWi", "nc", "zknd|zkne,64bit") // Zkne extension -TARGET_BUILTIN(__builtin_riscv_aes32esi_32, "ZiZiZiIUi", "nc", "zkne,32bit") -TARGET_BUILTIN(__builtin_riscv_aes32esmi_32, "ZiZiZiIUi", "nc", "zkne,32bit") -TARGET_BUILTIN(__builtin_riscv_aes64es_64, "WiWiWi", "nc", "zkne,64bit") -TARGET_BUILTIN(__builtin_riscv_aes64esm_64, "WiWiWi", "nc", "zkne,64bit") +TARGET_BUILTIN(__builtin_riscv_aes32esi_32, "UiUiUiIUi", "nc", "zkne,32bit") +TARGET_BUILTIN(__builtin_riscv_aes32esmi_32, "UiUiUiIUi", "nc", "zkne,32bit") +TARGET_BUILTIN(__builtin_riscv_aes64es_64, "UWiUWiUWi", "nc", "zkne,64bit") +TARGET_BUILTIN(__builtin_riscv_aes64esm_64, "UWiUWiUWi", "nc", "zkne,64bit") // Zknh extension -TARGET_BUILTIN(__builtin_riscv_sha256sig0, "LiLi", "nc", "zknh") -TARGET_BUILTIN(__builtin_riscv_sha256sig1, "LiLi", "nc", "zknh") -TARGET_BUILTIN(__builtin_riscv_sha256sum0, "LiLi", "nc", "zknh") -TARGET_BUILTIN(__builtin_riscv_sha256sum1, "LiLi", "nc", "zknh") +TARGET_BUILTIN(__builtin_riscv_sha256sig0, "ULiULi", "nc", "zknh") +TARGET_BUILTIN(__builtin_riscv_sha256sig1, "ULiULi", "nc", "zknh") +TARGET_BUILTIN(__builtin_riscv_sha256sum0, "ULiULi", "nc", "zknh") +TARGET_BUILTIN(__builtin_riscv_sha256sum1, "ULiULi", "nc", "zknh") -TARGET_BUILTIN(__builtin_riscv_sha512sig0h_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sig0l_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sig1h_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sig1l_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sum0r_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sum1r_32, "ZiZiZi", "nc", "zknh,32bit") -TARGET_BUILTIN(__builtin_riscv_sha512sig0_64, "WiWi", "nc", "zknh,64bit") -TARGET_BUILTIN(__builtin_riscv_sha512sig1_64, "WiWi", "nc", "zknh,64bit") -TARGET_BUILTIN(__builtin_riscv_sha512sum0_64, "WiWi", "nc", "zknh,64bit") -TARGET_BUILTIN(__builtin_riscv_sha512sum1_64, "WiWi", "nc", "zknh,64bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig0h_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig0l_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig1h_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig1l_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sum0r_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sum1r_32, "UiUiUi", "nc", "zknh,32bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig0_64, "UWiUWi", "nc", "zknh,64bit") +TARGET_BUILTIN(__builtin_riscv_sha512sig1_64, "UWiUWi", "nc", "zknh,64bit") +TARGET_BUILTIN(__builtin_riscv_sha512sum0_64, "UWiUWi", "nc", "zknh,64bit") +TARGET_BUILTIN(__builtin_riscv_sha512sum1_64, "UWiUWi", "nc", "zknh,64bit") // Zksed extension -TARGET_BUILTIN(__builtin_riscv_sm4ed, "LiLiLiIUi", "nc", "zksed") -TARGET_BUILTIN(__builtin_riscv_sm4ks, "LiLiLiIUi", "nc", "zksed") +TARGET_BUILTIN(__builtin_riscv_sm4ed, "ULiULiULiIUi", "nc", "zksed") +TARGET_BUILTIN(__builtin_riscv_sm4ks, "ULiULiULiIUi", "nc", "zksed") // Zksh extension -TARGET_BUILTIN(__builtin_riscv_sm3p0, "LiLi", "nc", "zksh") -TARGET_BUILTIN(__builtin_riscv_sm3p1, "LiLi", "nc", "zksh") +TARGET_BUILTIN(__builtin_riscv_sm3p0, "ULiULi", "nc", "zksh") +TARGET_BUILTIN(__builtin_riscv_sm3p1, "ULiULi", "nc", "zksh") // Zihintntl extension TARGET_BUILTIN(__builtin_riscv_ntl_load, "v.", "t", "experimental-zihintntl") diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c index d255ccb..2e08182 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkb.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkb -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZBKB +#include + // RV32ZBKB-LABEL: @brev8( // RV32ZBKB-NEXT: entry: // RV32ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -10,7 +12,7 @@ // RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]]) // RV32ZBKB-NEXT: ret i32 [[TMP1]] // -int brev8(int rs1) +uint32_t brev8(uint32_t rs1) { return __builtin_riscv_brev8_32(rs1); } @@ -23,7 +25,7 @@ int brev8(int rs1) // RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[TMP0]]) // RV32ZBKB-NEXT: ret i32 [[TMP1]] // -int zip(int rs1) +uint32_t zip(uint32_t rs1) { return __builtin_riscv_zip_32(rs1); } @@ -36,7 +38,7 @@ int zip(int rs1) // RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[TMP0]]) // RV32ZBKB-NEXT: ret i32 [[TMP1]] // -int unzip(int rs1) +uint32_t unzip(uint32_t rs1) { return __builtin_riscv_unzip_32(rs1); } diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c index dba6b52..06c24d1 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv32-zbkx.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zbkx -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZBKX +#include + // RV32ZBKX-LABEL: @xperm8( // RV32ZBKX-NEXT: entry: // RV32ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -13,7 +15,7 @@ // RV32ZBKX-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZBKX-NEXT: ret i32 [[TMP2]] // -int xperm8(int rs1, int rs2) +uint32_t xperm8(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_xperm8_32(rs1, rs2); } @@ -29,7 +31,7 @@ int xperm8(int rs1, int rs2) // RV32ZBKX-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZBKX-NEXT: ret i32 [[TMP2]] // -int xperm4(int rs1, int rs2) +uint32_t xperm4(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_xperm4_32(rs1, rs2); } diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c index d580d70..d2e3e76 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb-error.c @@ -1,12 +1,14 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -verify %s -o - -int zip(int rs1) +#include + +uint32_t zip(uint32_t rs1) { return __builtin_riscv_zip_32(rs1); // expected-error {{builtin requires: 'RV32'}} } -int unzip(int rs1) +uint32_t unzip(uint32_t rs1) { return __builtin_riscv_unzip_32(rs1); // expected-error {{builtin requires: 'RV32'}} } diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c index 70a05d2..f978a6a 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkb.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZBKB +#include + // RV64ZBKB-LABEL: @brev8_32( // RV64ZBKB-NEXT: entry: // RV64ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -10,7 +12,7 @@ // RV64ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]]) // RV64ZBKB-NEXT: ret i32 [[TMP1]] // -int brev8_32(int rs1) +uint32_t brev8_32(uint32_t rs1) { return __builtin_riscv_brev8_32(rs1); } @@ -23,7 +25,7 @@ int brev8_32(int rs1) // RV64ZBKB-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[TMP0]]) // RV64ZBKB-NEXT: ret i64 [[TMP1]] // -long brev8_64(long rs1) +uint64_t brev8_64(uint64_t rs1) { return __builtin_riscv_brev8_64(rs1); } diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c index 95db024..43e69aa 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/riscv64-zbkx.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zbkx -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZBKX +#include + // RV64ZBKX-LABEL: @xperm8( // RV64ZBKX-NEXT: entry: // RV64ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 @@ -13,7 +15,7 @@ // RV64ZBKX-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZBKX-NEXT: ret i64 [[TMP2]] // -long xperm8(long rs1, long rs2) +uint64_t xperm8(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_xperm8_64(rs1, rs2); } @@ -29,7 +31,7 @@ long xperm8(long rs1, long rs2) // RV64ZBKX-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZBKX-NEXT: ret i64 [[TMP2]] // -long xperm4(long rs1, long rs2) +uint64_t xperm4(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_xperm4_64(rs1, rs2); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c index 70bd4f12..cf24f1b 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknd.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zknd -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZKND +#include + // RV32ZKND-LABEL: @aes32dsi( // RV32ZKND-NEXT: entry: // RV32ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -13,7 +15,7 @@ // RV32ZKND-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[TMP0]], i32 [[TMP1]], i32 3) // RV32ZKND-NEXT: ret i32 [[TMP2]] // -int aes32dsi(int rs1, int rs2) { +uint32_t aes32dsi(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_aes32dsi_32(rs1, rs2, 3); } @@ -28,6 +30,6 @@ int aes32dsi(int rs1, int rs2) { // RV32ZKND-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[TMP0]], i32 [[TMP1]], i32 3) // RV32ZKND-NEXT: ret i32 [[TMP2]] // -int aes32dsmi(int rs1, int rs2) { +uint32_t aes32dsmi(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_aes32dsmi_32(rs1, rs2, 3); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c index 7528a3b..5b61b9b 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zkne.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zkne -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZKNE +#include + // RV32ZKNE-LABEL: @aes32esi( // RV32ZKNE-NEXT: entry: // RV32ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -13,7 +15,7 @@ // RV32ZKNE-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[TMP0]], i32 [[TMP1]], i32 3) // RV32ZKNE-NEXT: ret i32 [[TMP2]] // -int aes32esi(int rs1, int rs2) { +uint32_t aes32esi(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_aes32esi_32(rs1, rs2, 3); } @@ -28,6 +30,6 @@ int aes32esi(int rs1, int rs2) { // RV32ZKNE-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[TMP0]], i32 [[TMP1]], i32 3) // RV32ZKNE-NEXT: ret i32 [[TMP2]] // -int aes32esmi(int rs1, int rs2) { +uint32_t aes32esmi(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_aes32esmi_32(rs1, rs2, 3); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c index 0768510..a54a438 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zknh.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zknh -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZKNH +#include + // RV32ZKNH-LABEL: @sha256sig0( // RV32ZKNH-NEXT: entry: // RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -10,7 +12,7 @@ // RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0.i32(i32 [[TMP0]]) // RV32ZKNH-NEXT: ret i32 [[TMP1]] // -long sha256sig0(long rs1) { +unsigned long sha256sig0(unsigned long rs1) { return __builtin_riscv_sha256sig0(rs1); } @@ -22,7 +24,7 @@ long sha256sig0(long rs1) { // RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1.i32(i32 [[TMP0]]) // RV32ZKNH-NEXT: ret i32 [[TMP1]] // -long sha256sig1(long rs1) { +unsigned long sha256sig1(unsigned long rs1) { return __builtin_riscv_sha256sig1(rs1); } @@ -34,7 +36,7 @@ long sha256sig1(long rs1) { // RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0.i32(i32 [[TMP0]]) // RV32ZKNH-NEXT: ret i32 [[TMP1]] // -long sha256sum0(long rs1) { +unsigned long sha256sum0(unsigned long rs1) { return __builtin_riscv_sha256sum0(rs1); } @@ -46,7 +48,7 @@ long sha256sum0(long rs1) { // RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1.i32(i32 [[TMP0]]) // RV32ZKNH-NEXT: ret i32 [[TMP1]] // -long sha256sum1(long rs1) { +unsigned long sha256sum1(unsigned long rs1) { return __builtin_riscv_sha256sum1(rs1); } @@ -61,7 +63,7 @@ long sha256sum1(long rs1) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sig0h(int rs1, int rs2) { +uint32_t sha512sig0h(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sig0h_32(rs1, rs2); } @@ -76,7 +78,7 @@ int sha512sig0h(int rs1, int rs2) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sig0l(int rs1, int rs2) { +uint32_t sha512sig0l(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sig0l_32(rs1, rs2); } @@ -91,7 +93,7 @@ int sha512sig0l(int rs1, int rs2) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sig1h(int rs1, int rs2) { +uint32_t sha512sig1h(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sig1h_32(rs1, rs2); } @@ -106,7 +108,7 @@ int sha512sig1h(int rs1, int rs2) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sig1l(int rs1, int rs2) { +uint32_t sha512sig1l(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sig1l_32(rs1, rs2); } @@ -121,7 +123,7 @@ int sha512sig1l(int rs1, int rs2) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sum0r(int rs1, int rs2) { +uint32_t sha512sum0r(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sum0r_32(rs1, rs2); } @@ -136,6 +138,6 @@ int sha512sum0r(int rs1, int rs2) { // RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP0]], i32 [[TMP1]]) // RV32ZKNH-NEXT: ret i32 [[TMP2]] // -int sha512sum1r(int rs1, int rs2) { +uint32_t sha512sum1r(uint32_t rs1, uint32_t rs2) { return __builtin_riscv_sha512sum1r_32(rs1, rs2); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c index 5cb1ca9..45c52f1 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksed.c @@ -2,6 +2,8 @@ // RUN: %clang_cc1 -triple riscv32 -target-feature +zksed -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV32ZKSED +#include + // RV32ZKSED-LABEL: @sm4ks( // RV32ZKSED-NEXT: entry: // RV32ZKSED-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 @@ -13,7 +15,7 @@ // RV32ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0) // RV32ZKSED-NEXT: ret i32 [[TMP2]] // -long sm4ks(long rs1, long rs2) { +unsigned long sm4ks(unsigned long rs1, unsigned long rs2) { return __builtin_riscv_sm4ks(rs1, rs2, 0); } @@ -29,6 +31,6 @@ long sm4ks(long rs1, long rs2) { // RV32ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed.i32(i32 [[TMP0]], i32 [[TMP1]], i32 0) // RV32ZKSED-NEXT: ret i32 [[TMP2]] // -long sm4ed(long rs1, long rs2) { +unsigned long sm4ed(unsigned long rs1, unsigned long rs2) { return __builtin_riscv_sm4ed(rs1, rs2, 0); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c index 31234da..d901f162 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv32-zksh.c @@ -10,7 +10,7 @@ // RV32ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0.i32(i32 [[TMP0]]) // RV32ZKSH-NEXT: ret i32 [[TMP1]] // -long sm3p0(long rs1) +unsigned long sm3p0(unsigned long rs1) { return __builtin_riscv_sm3p0(rs1); } @@ -23,6 +23,6 @@ long sm3p0(long rs1) // RV32ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1.i32(i32 [[TMP0]]) // RV32ZKSH-NEXT: ret i32 [[TMP1]] // -long sm3p1(long rs1) { +unsigned long sm3p1(unsigned long rs1) { return __builtin_riscv_sm3p1(rs1); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c index 3596c45..0d2aac2 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd-zkne.c @@ -4,6 +4,8 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE +#include + // RV64ZKND-ZKNE-LABEL: @aes64ks1i( // RV64ZKND-ZKNE-NEXT: entry: // RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 @@ -12,7 +14,7 @@ // RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[TMP0]], i32 0) // RV64ZKND-ZKNE-NEXT: ret i64 [[TMP1]] // -long aes64ks1i(long rs1) { +uint64_t aes64ks1i(uint64_t rs1) { return __builtin_riscv_aes64ks1i_64(rs1, 0); } @@ -27,6 +29,6 @@ long aes64ks1i(long rs1) { // RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZKND-ZKNE-NEXT: ret i64 [[TMP2]] // -long aes64ks2(long rs1, long rs2) { +uint64_t aes64ks2(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_aes64ks2_64(rs1, rs2); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c index 3795871..98490ec 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknd.c @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZKND +#include // RV64ZKND-LABEL: @aes64dsm( // RV64ZKND-NEXT: entry: @@ -14,7 +15,7 @@ // RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZKND-NEXT: ret i64 [[TMP2]] // -long aes64dsm(long rs1, long rs2) { +uint64_t aes64dsm(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_aes64dsm_64(rs1, rs2); } @@ -30,7 +31,7 @@ long aes64dsm(long rs1, long rs2) { // RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZKND-NEXT: ret i64 [[TMP2]] // -long aes64ds(long rs1, long rs2) { +uint64_t aes64ds(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_aes64ds_64(rs1, rs2); } @@ -43,6 +44,6 @@ long aes64ds(long rs1, long rs2) { // RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]]) // RV64ZKND-NEXT: ret i64 [[TMP1]] // -long aes64im(long rs1) { +uint64_t aes64im(uint64_t rs1) { return __builtin_riscv_aes64im_64(rs1); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c index c0cf69b..c605a2a 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zkne.c @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZKNE +#include // RV64ZKNE-LABEL: @aes64es( // RV64ZKNE-NEXT: entry: @@ -14,7 +15,7 @@ // RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZKNE-NEXT: ret i64 [[TMP2]] // -long aes64es(long rs1, long rs2) { +uint64_t aes64es(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_aes64es_64(rs1, rs2); } @@ -30,6 +31,6 @@ long aes64es(long rs1, long rs2) { // RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]]) // RV64ZKNE-NEXT: ret i64 [[TMP2]] // -long aes64esm(long rs1, long rs2) { +uint64_t aes64esm(uint64_t rs1, uint64_t rs2) { return __builtin_riscv_aes64esm_64(rs1, rs2); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c index 46cbe18..5767153 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c @@ -2,6 +2,7 @@ // RUN: %clang_cc1 -triple riscv64 -target-feature +zknh -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=RV64ZKNH +#include // RV64ZKNH-LABEL: @sha512sig0( // RV64ZKNH-NEXT: entry: @@ -11,7 +12,7 @@ // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha512sig0(long rs1) { +uint64_t sha512sig0(uint64_t rs1) { return __builtin_riscv_sha512sig0_64(rs1); } @@ -24,7 +25,7 @@ long sha512sig0(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha512sig1(long rs1) { +uint64_t sha512sig1(uint64_t rs1) { return __builtin_riscv_sha512sig1_64(rs1); } @@ -37,7 +38,7 @@ long sha512sig1(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha512sum0(long rs1) { +uint64_t sha512sum0(uint64_t rs1) { return __builtin_riscv_sha512sum0_64(rs1); } @@ -50,7 +51,7 @@ long sha512sum0(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha512sum1(long rs1) { +uint64_t sha512sum1(uint64_t rs1) { return __builtin_riscv_sha512sum1_64(rs1); } @@ -63,7 +64,7 @@ long sha512sum1(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sig0.i64(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha256sig0(long rs1) { +uint64_t sha256sig0(uint64_t rs1) { return __builtin_riscv_sha256sig0(rs1); } @@ -75,7 +76,7 @@ long sha256sig0(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sig1.i64(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha256sig1(long rs1) { +uint64_t sha256sig1(uint64_t rs1) { return __builtin_riscv_sha256sig1(rs1); } @@ -88,7 +89,7 @@ long sha256sig1(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sum0.i64(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha256sum0(long rs1) { +uint64_t sha256sum0(uint64_t rs1) { return __builtin_riscv_sha256sum0(rs1); } @@ -100,6 +101,6 @@ long sha256sum0(long rs1) { // RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha256sum1.i64(i64 [[TMP0]]) // RV64ZKNH-NEXT: ret i64 [[TMP1]] // -long sha256sum1(long rs1) { +uint64_t sha256sum1(uint64_t rs1) { return __builtin_riscv_sha256sum1(rs1); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c index 89ec909..4c49d11 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksed.c @@ -13,7 +13,7 @@ // RV64ZKSED-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ks.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0) // RV64ZKSED-NEXT: ret i64 [[TMP2]] // -long sm4ks(long rs1, long rs2) { +unsigned long sm4ks(unsigned long rs1, unsigned long rs2) { return __builtin_riscv_sm4ks(rs1, rs2, 0); } @@ -28,6 +28,6 @@ long sm4ks(long rs1, long rs2) { // RV64ZKSED-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.sm4ed.i64(i64 [[TMP0]], i64 [[TMP1]], i32 0) // RV64ZKSED-NEXT: ret i64 [[TMP2]] // -long sm4ed(long rs1, long rs2) { +unsigned long sm4ed(unsigned long rs1, unsigned long rs2) { return __builtin_riscv_sm4ed(rs1, rs2, 0); } diff --git a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c index fa1d5b0..f90bc78 100644 --- a/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c +++ b/clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zksh.c @@ -10,7 +10,7 @@ // RV64ZKSH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sm3p0.i64(i64 [[TMP0]]) // RV64ZKSH-NEXT: ret i64 [[TMP1]] // -long sm3p0(long rs1) { +unsigned long sm3p0(unsigned long rs1) { return __builtin_riscv_sm3p0(rs1); } @@ -23,6 +23,6 @@ long sm3p0(long rs1) { // RV64ZKSH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sm3p1.i64(i64 [[TMP0]]) // RV64ZKSH-NEXT: ret i64 [[TMP1]] // -long sm3p1(long rs1) { +unsigned long sm3p1(unsigned long rs1) { return __builtin_riscv_sm3p1(rs1); } -- 2.7.4