As requested on D155647.
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +xtheadbb -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32XTHEADBB
// RV32XTHEADBB-LABEL: @clz_32(
// RV32XTHEADBB-NEXT: entry:
-// RV32XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32XTHEADBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
-// RV32XTHEADBB-NEXT: ret i32 [[TMP1]]
+// RV32XTHEADBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
+// RV32XTHEADBB-NEXT: ret i32 [[TMP0]]
//
unsigned int clz_32(unsigned int a) {
return __builtin_riscv_clz_32(a);
// RV32XTHEADBB-LABEL: @clo_32(
// RV32XTHEADBB-NEXT: entry:
-// RV32XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32XTHEADBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32XTHEADBB-NEXT: [[NOT:%.*]] = xor i32 [[TMP0]], -1
-// RV32XTHEADBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
-// RV32XTHEADBB-NEXT: ret i32 [[TMP1]]
+// RV32XTHEADBB-NEXT: [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+// RV32XTHEADBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
+// RV32XTHEADBB-NEXT: ret i32 [[TMP0]]
//
unsigned int clo_32(unsigned int a) {
return __builtin_riscv_clz_32(~a);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zbkb -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZBKB
#include <stdint.h>
// RV32ZBKB-LABEL: @brev8(
// RV32ZBKB-NEXT: entry:
-// RV32ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT: ret i32 [[TMP1]]
+// RV32ZBKB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT: ret i32 [[TMP0]]
//
uint32_t brev8(uint32_t rs1)
{
// RV32ZBKB-LABEL: @zip(
// RV32ZBKB-NEXT: entry:
-// RV32ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT: ret i32 [[TMP1]]
+// RV32ZBKB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.zip.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT: ret i32 [[TMP0]]
//
uint32_t zip(uint32_t rs1)
{
// RV32ZBKB-LABEL: @unzip(
// RV32ZBKB-NEXT: entry:
-// RV32ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKB-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[TMP0]])
-// RV32ZBKB-NEXT: ret i32 [[TMP1]]
+// RV32ZBKB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.unzip.i32(i32 [[RS1:%.*]])
+// RV32ZBKB-NEXT: ret i32 [[TMP0]]
//
uint32_t unzip(uint32_t rs1)
{
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zbkc -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZBKC
#include <stdint.h>
// RV32ZBKC-LABEL: @clmul_32(
// RV32ZBKC-NEXT: entry:
-// RV32ZBKC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT: ret i32 [[TMP2]]
+// RV32ZBKC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBKC-NEXT: ret i32 [[TMP0]]
//
uint32_t clmul_32(uint32_t a, uint32_t b) {
return __builtin_riscv_clmul_32(a, b);
// RV32ZBKC-LABEL: @clmulh_32(
// RV32ZBKC-NEXT: entry:
-// RV32ZBKC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV32ZBKC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKC-NEXT: ret i32 [[TMP2]]
+// RV32ZBKC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV32ZBKC-NEXT: ret i32 [[TMP0]]
//
uint32_t clmulh_32(uint32_t a, uint32_t b) {
return __builtin_riscv_clmulh_32(a, b);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zbkx -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZBKX
#include <stdint.h>
// RV32ZBKX-LABEL: @xperm8(
// RV32ZBKX-NEXT: entry:
-// RV32ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT: ret i32 [[TMP2]]
+// RV32ZBKX-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.xperm8.i32(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZBKX-NEXT: ret i32 [[TMP0]]
//
uint32_t xperm8(uint32_t rs1, uint32_t rs2)
{
// RV32ZBKX-LABEL: @xperm4(
// RV32ZBKX-NEXT: entry:
-// RV32ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZBKX-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZBKX-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZBKX-NEXT: ret i32 [[TMP2]]
+// RV32ZBKX-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.xperm4.i32(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZBKX-NEXT: ret i32 [[TMP0]]
//
uint32_t xperm4(uint32_t rs1, uint32_t rs2)
{
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +xtheadbb -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64XTHEADBB
// RV64XTHEADBB-LABEL: @clz_32(
// RV64XTHEADBB-NEXT: entry:
-// RV64XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64XTHEADBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[TMP0]], i1 false)
-// RV64XTHEADBB-NEXT: ret i32 [[TMP1]]
+// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[A:%.*]], i1 false)
+// RV64XTHEADBB-NEXT: ret i32 [[TMP0]]
//
unsigned int clz_32(unsigned int a) {
return __builtin_riscv_clz_32(a);
// RV64XTHEADBB-LABEL: @clo_32(
// RV64XTHEADBB-NEXT: entry:
-// RV64XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64XTHEADBB-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64XTHEADBB-NEXT: [[NOT:%.*]] = xor i32 [[TMP0]], -1
-// RV64XTHEADBB-NEXT: [[TMP1:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
-// RV64XTHEADBB-NEXT: ret i32 [[TMP1]]
+// RV64XTHEADBB-NEXT: [[NOT:%.*]] = xor i32 [[A:%.*]], -1
+// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.ctlz.i32(i32 [[NOT]], i1 false)
+// RV64XTHEADBB-NEXT: ret i32 [[TMP0]]
//
unsigned int clo_32(unsigned int a) {
return __builtin_riscv_clz_32(~a);
// RV64XTHEADBB-LABEL: @clz_64(
// RV64XTHEADBB-NEXT: entry:
-// RV64XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64XTHEADBB-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[TMP0]], i1 false)
-// RV64XTHEADBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[A:%.*]], i1 false)
+// RV64XTHEADBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
// RV64XTHEADBB-NEXT: ret i32 [[CAST]]
//
unsigned int clz_64(unsigned long a) {
// RV64XTHEADBB-LABEL: @clo_64(
// RV64XTHEADBB-NEXT: entry:
-// RV64XTHEADBB-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64XTHEADBB-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64XTHEADBB-NEXT: [[NOT:%.*]] = xor i64 [[TMP0]], -1
-// RV64XTHEADBB-NEXT: [[TMP1:%.*]] = call i64 @llvm.ctlz.i64(i64 [[NOT]], i1 false)
-// RV64XTHEADBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP1]] to i32
+// RV64XTHEADBB-NEXT: [[NOT:%.*]] = xor i64 [[A:%.*]], -1
+// RV64XTHEADBB-NEXT: [[TMP0:%.*]] = call i64 @llvm.ctlz.i64(i64 [[NOT]], i1 false)
+// RV64XTHEADBB-NEXT: [[CAST:%.*]] = trunc i64 [[TMP0]] to i32
// RV64XTHEADBB-NEXT: ret i32 [[CAST]]
//
unsigned int clo_64(unsigned long a) {
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbkb -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZBKB
#include <stdint.h>
// RV64ZBKB-LABEL: @brev8_32(
// RV64ZBKB-NEXT: entry:
-// RV64ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKB-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZBKB-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZBKB-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[TMP0]])
-// RV64ZBKB-NEXT: ret i32 [[TMP1]]
+// RV64ZBKB-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.brev8.i32(i32 [[RS1:%.*]])
+// RV64ZBKB-NEXT: ret i32 [[TMP0]]
//
uint32_t brev8_32(uint32_t rs1)
{
// RV64ZBKB-LABEL: @brev8_64(
// RV64ZBKB-NEXT: entry:
-// RV64ZBKB-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKB-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKB-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKB-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[TMP0]])
-// RV64ZBKB-NEXT: ret i64 [[TMP1]]
+// RV64ZBKB-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.brev8.i64(i64 [[RS1:%.*]])
+// RV64ZBKB-NEXT: ret i64 [[TMP0]]
//
uint64_t brev8_64(uint64_t rs1)
{
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbkc -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZBKC
#include <stdint.h>
// RV64ZBKC-LABEL: @clmul_64(
// RV64ZBKC-NEXT: entry:
-// RV64ZBKC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT: ret i64 [[TMP2]]
+// RV64ZBKC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBKC-NEXT: ret i64 [[TMP0]]
//
uint64_t clmul_64(uint64_t a, uint64_t b) {
return __builtin_riscv_clmul_64(a, b);
// RV64ZBKC-LABEL: @clmulh_64(
// RV64ZBKC-NEXT: entry:
-// RV64ZBKC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8
-// RV64ZBKC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKC-NEXT: ret i64 [[TMP2]]
+// RV64ZBKC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[A:%.*]], i64 [[B:%.*]])
+// RV64ZBKC-NEXT: ret i64 [[TMP0]]
//
uint64_t clmulh_64(uint64_t a, uint64_t b) {
return __builtin_riscv_clmulh_64(a, b);
// RV64ZBKC-LABEL: @clmul_32(
// RV64ZBKC-NEXT: entry:
-// RV64ZBKC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4
-// RV64ZBKC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4
-// RV64ZBKC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4
-// RV64ZBKC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
-// RV64ZBKC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4
-// RV64ZBKC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]])
-// RV64ZBKC-NEXT: ret i32 [[TMP2]]
+// RV64ZBKC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]])
+// RV64ZBKC-NEXT: ret i32 [[TMP0]]
//
uint32_t clmul_32(uint32_t a, uint32_t b) {
return __builtin_riscv_clmul_32(a, b);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zbkx -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZBKX
#include <stdint.h>
// RV64ZBKX-LABEL: @xperm8(
// RV64ZBKX-NEXT: entry:
-// RV64ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT: ret i64 [[TMP2]]
+// RV64ZBKX-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.xperm8.i64(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZBKX-NEXT: ret i64 [[TMP0]]
//
uint64_t xperm8(uint64_t rs1, uint64_t rs2)
{
// RV64ZBKX-LABEL: @xperm4(
// RV64ZBKX-NEXT: entry:
-// RV64ZBKX-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZBKX-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZBKX-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZBKX-NEXT: ret i64 [[TMP2]]
+// RV64ZBKX-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.xperm4.i64(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZBKX-NEXT: ret i64 [[TMP0]]
//
uint64_t xperm4(uint64_t rs1, uint64_t rs2)
{
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zknd -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZKND
#include <stdint.h>
// RV32ZKND-LABEL: @aes32dsi(
// RV32ZKND-NEXT: entry:
-// RV32ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKND-NEXT: ret i32 [[TMP2]]
+// RV32ZKND-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.aes32dsi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKND-NEXT: ret i32 [[TMP0]]
//
uint32_t aes32dsi(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_aes32dsi(rs1, rs2, 3);
// RV32ZKND-LABEL: @aes32dsmi(
// RV32ZKND-NEXT: entry:
-// RV32ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKND-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKND-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKND-NEXT: ret i32 [[TMP2]]
+// RV32ZKND-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.aes32dsmi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKND-NEXT: ret i32 [[TMP0]]
//
uint32_t aes32dsmi(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_aes32dsmi(rs1, rs2, 3);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zkne -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZKNE
#include <stdint.h>
// RV32ZKNE-LABEL: @aes32esi(
// RV32ZKNE-NEXT: entry:
-// RV32ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKNE-NEXT: ret i32 [[TMP2]]
+// RV32ZKNE-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.aes32esi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKNE-NEXT: ret i32 [[TMP0]]
//
uint32_t aes32esi(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_aes32esi(rs1, rs2, 3);
// RV32ZKNE-LABEL: @aes32esmi(
// RV32ZKNE-NEXT: entry:
-// RV32ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNE-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNE-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[TMP0]], i32 [[TMP1]], i32 3)
-// RV32ZKNE-NEXT: ret i32 [[TMP2]]
+// RV32ZKNE-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.aes32esmi(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 3)
+// RV32ZKNE-NEXT: ret i32 [[TMP0]]
//
uint32_t aes32esmi(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_aes32esmi(rs1, rs2, 3);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zknh -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZKNH
#include <stdint.h>
// RV32ZKNH-LABEL: @sha256sig0(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV32ZKNH-NEXT: ret i32 [[TMP1]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sig0(uint32_t rs1) {
return __builtin_riscv_sha256sig0(rs1);
// RV32ZKNH-LABEL: @sha256sig1(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV32ZKNH-NEXT: ret i32 [[TMP1]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sig1(uint32_t rs1) {
return __builtin_riscv_sha256sig1(rs1);
// RV32ZKNH-LABEL: @sha256sum0(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV32ZKNH-NEXT: ret i32 [[TMP1]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sum0(uint32_t rs1) {
return __builtin_riscv_sha256sum0(rs1);
// RV32ZKNH-LABEL: @sha256sum1(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV32ZKNH-NEXT: ret i32 [[TMP1]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[RS1:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sum1(uint32_t rs1) {
return __builtin_riscv_sha256sum1(rs1);
// RV32ZKNH-LABEL: @sha512sig0h(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig0h(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sig0h(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sig0h(rs1, rs2);
// RV32ZKNH-LABEL: @sha512sig0l(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig0l(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sig0l(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sig0l(rs1, rs2);
// RV32ZKNH-LABEL: @sha512sig1h(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig1h(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sig1h(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sig1h(rs1, rs2);
// RV32ZKNH-LABEL: @sha512sig1l(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sig1l(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sig1l(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sig1l(rs1, rs2);
// RV32ZKNH-LABEL: @sha512sum0r(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sum0r(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sum0r(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sum0r(rs1, rs2);
// RV32ZKNH-LABEL: @sha512sum1r(
// RV32ZKNH-NEXT: entry:
-// RV32ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKNH-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[TMP0]], i32 [[TMP1]])
-// RV32ZKNH-NEXT: ret i32 [[TMP2]]
+// RV32ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha512sum1r(i32 [[RS1:%.*]], i32 [[RS2:%.*]])
+// RV32ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha512sum1r(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sha512sum1r(rs1, rs2);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE
// RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKND-ZKNE
#include <stdint.h>
// RV64ZKND-ZKNE-LABEL: @aes64ks1i(
// RV64ZKND-ZKNE-NEXT: entry:
-// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[TMP0]], i32 0)
-// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP1]]
+// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ks1i(i64 [[RS1:%.*]], i32 0)
+// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64ks1i(uint64_t rs1) {
return __builtin_riscv_aes64ks1i(rs1, 0);
// RV64ZKND-ZKNE-LABEL: @aes64ks2(
// RV64ZKND-ZKNE-NEXT: entry:
-// RV64ZKND-ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP2]]
+// RV64ZKND-ZKNE-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ks2(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-ZKNE-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64ks2(uint64_t rs1, uint64_t rs2) {
return __builtin_riscv_aes64ks2(rs1, rs2);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zknd -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKND
#include <stdint.h>
// RV64ZKND-LABEL: @aes64dsm(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT: ret i64 [[TMP2]]
+// RV64ZKND-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64dsm(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64dsm(uint64_t rs1, uint64_t rs2) {
return __builtin_riscv_aes64dsm(rs1, rs2);
// RV64ZKND-LABEL: @aes64ds(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKND-NEXT: ret i64 [[TMP2]]
+// RV64ZKND-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64ds(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKND-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64ds(uint64_t rs1, uint64_t rs2) {
return __builtin_riscv_aes64ds(rs1, rs2);
// RV64ZKND-LABEL: @aes64im(
// RV64ZKND-NEXT: entry:
-// RV64ZKND-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKND-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKND-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[TMP0]])
-// RV64ZKND-NEXT: ret i64 [[TMP1]]
+// RV64ZKND-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64im(i64 [[RS1:%.*]])
+// RV64ZKND-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64im(uint64_t rs1) {
return __builtin_riscv_aes64im(rs1);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zkne -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKNE
#include <stdint.h>
// RV64ZKNE-LABEL: @aes64es(
// RV64ZKNE-NEXT: entry:
-// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT: ret i64 [[TMP2]]
+// RV64ZKNE-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64es(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKNE-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64es(uint64_t rs1, uint64_t rs2) {
return __builtin_riscv_aes64es(rs1, rs2);
// RV64ZKNE-LABEL: @aes64esm(
// RV64ZKNE-NEXT: entry:
-// RV64ZKNE-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNE-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT: store i64 [[RS2:%.*]], ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP1:%.*]] = load i64, ptr [[RS2_ADDR]], align 8
-// RV64ZKNE-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[TMP0]], i64 [[TMP1]])
-// RV64ZKNE-NEXT: ret i64 [[TMP2]]
+// RV64ZKNE-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.aes64esm(i64 [[RS1:%.*]], i64 [[RS2:%.*]])
+// RV64ZKNE-NEXT: ret i64 [[TMP0]]
//
uint64_t aes64esm(uint64_t rs1, uint64_t rs2) {
return __builtin_riscv_aes64esm(rs1, rs2);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv64 -target-feature +zknh -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKNH
#include <stdint.h>
// RV64ZKNH-LABEL: @sha512sig0(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]])
-// RV64ZKNH-NEXT: ret i64 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i64 [[TMP0]]
//
uint64_t sha512sig0(uint64_t rs1) {
return __builtin_riscv_sha512sig0(rs1);
// RV64ZKNH-LABEL: @sha512sig1(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]])
-// RV64ZKNH-NEXT: ret i64 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i64 [[TMP0]]
//
uint64_t sha512sig1(uint64_t rs1) {
return __builtin_riscv_sha512sig1(rs1);
// RV64ZKNH-LABEL: @sha512sum0(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]])
-// RV64ZKNH-NEXT: ret i64 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i64 [[TMP0]]
//
uint64_t sha512sum0(uint64_t rs1) {
return __builtin_riscv_sha512sum0(rs1);
// RV64ZKNH-LABEL: @sha512sum1(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
-// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]])
-// RV64ZKNH-NEXT: ret i64 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i64 [[TMP0]]
//
uint64_t sha512sum1(uint64_t rs1) {
return __builtin_riscv_sha512sum1(rs1);
// RV64ZKNH-LABEL: @sha256sig0(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[TMP0]])
-// RV64ZKNH-NEXT: ret i32 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig0(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sig0(uint32_t rs1) {
return __builtin_riscv_sha256sig0(rs1);
// RV64ZKNH-LABEL: @sha256sig1(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[TMP0]])
-// RV64ZKNH-NEXT: ret i32 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sig1(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sig1(uint32_t rs1) {
return __builtin_riscv_sha256sig1(rs1);
// RV64ZKNH-LABEL: @sha256sum0(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[TMP0]])
-// RV64ZKNH-NEXT: ret i32 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum0(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sum0(uint32_t rs1) {
return __builtin_riscv_sha256sum0(rs1);
// RV64ZKNH-LABEL: @sha256sum1(
// RV64ZKNH-NEXT: entry:
-// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[TMP0]])
-// RV64ZKNH-NEXT: ret i32 [[TMP1]]
+// RV64ZKNH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sha256sum1(i32 [[RS1:%.*]])
+// RV64ZKNH-NEXT: ret i32 [[TMP0]]
//
uint32_t sha256sum1(uint32_t rs1) {
return __builtin_riscv_sha256sum1(rs1);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zksed -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZKSED
// RUN: %clang_cc1 -triple riscv64 -target-feature +zksed -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKSED
#include <stdint.h>
// RV32ZKSED-LABEL: @sm4ks(
// RV32ZKSED-NEXT: entry:
-// RV32ZKSED-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV32ZKSED-NEXT: ret i32 [[TMP2]]
+// RV32ZKSED-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV32ZKSED-NEXT: ret i32 [[TMP0]]
//
// RV64ZKSED-LABEL: @sm4ks(
// RV64ZKSED-NEXT: entry:
-// RV64ZKSED-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT: ret i32 [[TMP2]]
+// RV64ZKSED-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ks(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV64ZKSED-NEXT: ret i32 [[TMP0]]
//
uint32_t sm4ks(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sm4ks(rs1, rs2, 0);
// RV32ZKSED-LABEL: @sm4ed(
// RV32ZKSED-NEXT: entry:
-// RV32ZKSED-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSED-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV32ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV32ZKSED-NEXT: ret i32 [[TMP2]]
+// RV32ZKSED-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV32ZKSED-NEXT: ret i32 [[TMP0]]
//
// RV64ZKSED-LABEL: @sm4ed(
// RV64ZKSED-NEXT: entry:
-// RV64ZKSED-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSED-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT: store i32 [[RS2:%.*]], ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP1:%.*]] = load i32, ptr [[RS2_ADDR]], align 4
-// RV64ZKSED-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[TMP0]], i32 [[TMP1]], i32 0)
-// RV64ZKSED-NEXT: ret i32 [[TMP2]]
+// RV64ZKSED-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm4ed(i32 [[RS1:%.*]], i32 [[RS2:%.*]], i32 0)
+// RV64ZKSED-NEXT: ret i32 [[TMP0]]
//
uint32_t sm4ed(uint32_t rs1, uint32_t rs2) {
return __builtin_riscv_sm4ed(rs1, rs2, 0);
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple riscv32 -target-feature +zksh -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV32ZKSH
// RUN: %clang_cc1 -triple riscv64 -target-feature +zksh -emit-llvm %s -o - \
+// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \
// RUN: | FileCheck %s -check-prefix=RV64ZKSH
#include <stdint.h>
// RV32ZKSH-LABEL: @sm3p0(
// RV32ZKSH-NEXT: entry:
-// RV32ZKSH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV32ZKSH-NEXT: ret i32 [[TMP1]]
+// RV32ZKSH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[RS1:%.*]])
+// RV32ZKSH-NEXT: ret i32 [[TMP0]]
//
// RV64ZKSH-LABEL: @sm3p0(
// RV64ZKSH-NEXT: entry:
-// RV64ZKSH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[TMP0]])
-// RV64ZKSH-NEXT: ret i32 [[TMP1]]
+// RV64ZKSH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p0(i32 [[RS1:%.*]])
+// RV64ZKSH-NEXT: ret i32 [[TMP0]]
//
uint32_t sm3p0(uint32_t rs1) {
return __builtin_riscv_sm3p0(rs1);
// RV32ZKSH-LABEL: @sm3p1(
// RV32ZKSH-NEXT: entry:
-// RV32ZKSH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV32ZKSH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV32ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV32ZKSH-NEXT: ret i32 [[TMP1]]
+// RV32ZKSH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[RS1:%.*]])
+// RV32ZKSH-NEXT: ret i32 [[TMP0]]
//
// RV64ZKSH-LABEL: @sm3p1(
// RV64ZKSH-NEXT: entry:
-// RV64ZKSH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
-// RV64ZKSH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4
-// RV64ZKSH-NEXT: [[TMP1:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[TMP0]])
-// RV64ZKSH-NEXT: ret i32 [[TMP1]]
+// RV64ZKSH-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.sm3p1(i32 [[RS1:%.*]])
+// RV64ZKSH-NEXT: ret i32 [[TMP0]]
//
uint32_t sm3p1(uint32_t rs1) {
return __builtin_riscv_sm3p1(rs1);