From 8c971d59a7c16247954a90335b50bb5b2cc88d34 Mon Sep 17 00:00:00 2001 From: Ju-Zhe Zhong Date: Fri, 3 Feb 2023 15:53:15 +0800 Subject: [PATCH] RISC-V: Add vmul.vx C++ API testcase gcc/testsuite/ChangeLog: * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv32-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv32-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv32-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv64-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv64-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_rv64-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C: New test. * g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C: New test. --- .../g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv32-1.C | 572 ++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv32-2.C | 572 ++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv32-3.C | 572 ++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv64-1.C | 578 +++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv64-2.C | 578 +++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_rv64-3.C | 578 +++++++++++++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C | 289 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C | 292 +++++++++++ .../g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C | 292 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv32-1.C | 289 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv32-2.C | 289 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv32-3.C | 289 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv64-1.C | 292 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv64-2.C | 292 +++++++++++ .../riscv/rvv/base/vmul_vx_tumu_rv64-3.C | 292 +++++++++++ 30 files changed, 10422 insertions(+) create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C create mode 100644 gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C new file mode 100644 index 0000000..323f55e --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-1.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C new file mode 100644 index 0000000..6f264d9 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-2.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C new file mode 100644 index 0000000..9e3a93c --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv32-3.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C new file mode 100644 index 0000000..77e5224 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-1.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C new file mode 100644 index 0000000..4104fe9 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-2.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C new file mode 100644 index 0000000..ad69943 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_mu_rv64-3.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_mu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_mu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_mu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_mu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_mu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_mu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_mu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_mu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_mu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_mu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_mu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_mu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_mu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_mu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_mu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_mu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_mu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_mu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_mu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_mu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_mu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_mu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_mu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_mu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_mu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_mu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_mu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_mu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_mu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_mu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_mu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_mu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_mu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_mu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_mu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_mu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_mu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_mu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_mu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C new file mode 100644 index 0000000..c68218d --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-1.C @@ -0,0 +1,572 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C new file mode 100644 index 0000000..f0403fb --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-2.C @@ -0,0 +1,572 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C new file mode 100644 index 0000000..e162564 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv32-3.C @@ -0,0 +1,572 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C new file mode 100644 index 0000000..badbd3f --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-1.C @@ -0,0 +1,578 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,vl); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C new file mode 100644 index 0000000..6c36d54 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-2.C @@ -0,0 +1,578 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,31); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C new file mode 100644 index 0000000..c3f098a --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_rv64-3.C @@ -0,0 +1,578 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul(vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul(vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul(vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m1_t test___riscv_vmul(vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m2_t test___riscv_vmul(vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m4_t test___riscv_vmul(vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8m8_t test___riscv_vmul(vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul(vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul(vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m1_t test___riscv_vmul(vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m2_t test___riscv_vmul(vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m4_t test___riscv_vmul(vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint16m8_t test___riscv_vmul(vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul(vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m1_t test___riscv_vmul(vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m2_t test___riscv_vmul(vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m4_t test___riscv_vmul(vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint32m8_t test___riscv_vmul(vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m1_t test___riscv_vmul(vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m2_t test___riscv_vmul(vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m4_t test___riscv_vmul(vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint64m8_t test___riscv_vmul(vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul(vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul(vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul(vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul(vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul(vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul(vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul(vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul(vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul(vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul(vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul(vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul(vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul(vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul(vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul(vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul(vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul(vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul(vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul(vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul(vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul(vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul(vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(op1,op2,32); +} + + +vint8mf8_t test___riscv_vmul(vbool64_t mask,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul(vbool32_t mask,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul(vbool16_t mask,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul(vbool8_t mask,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul(vbool4_t mask,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul(vbool2_t mask,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul(vbool1_t mask,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul(vbool64_t mask,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul(vbool32_t mask,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul(vbool16_t mask,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul(vbool8_t mask,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul(vbool4_t mask,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul(vbool2_t mask,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul(vbool64_t mask,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul(vbool32_t mask,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul(vbool16_t mask,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul(vbool8_t mask,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul(vbool4_t mask,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul(vbool64_t mask,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul(vbool32_t mask,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul(vbool16_t mask,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul(vbool8_t mask,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul(vbool64_t mask,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul(vbool32_t mask,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul(vbool16_t mask,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul(vbool8_t mask,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul(vbool4_t mask,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul(vbool2_t mask,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul(vbool1_t mask,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul(vbool64_t mask,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul(vbool32_t mask,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul(vbool16_t mask,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul(vbool8_t mask,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul(vbool4_t mask,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul(vbool2_t mask,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul(vbool64_t mask,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul(vbool32_t mask,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul(vbool16_t mask,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul(vbool8_t mask,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul(vbool4_t mask,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul(vbool64_t mask,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul(vbool32_t mask,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul(vbool16_t mask,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul(vbool8_t mask,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul(mask,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*t[au],\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C new file mode 100644 index 0000000..dbf865f --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-1.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C new file mode 100644 index 0000000..51152ca --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-2.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C new file mode 100644 index 0000000..e2abfdd --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv32-3.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C new file mode 100644 index 0000000..de74cec --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-1.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C new file mode 100644 index 0000000..fa68727 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-2.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C new file mode 100644 index 0000000..0865b9b --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tu_rv64-3.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tu(vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tu(vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tu(vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tu(vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tu(vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tu(vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tu(vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tu(vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tu(vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tu(vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tu(vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tu(vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tu(vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tu(vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tu(vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tu(vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tu(vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tu(vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tu(vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tu(vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tu(vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tu(vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tu(vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tu(vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tu(vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tu(vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tu(vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tu(vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tu(vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tu(vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tu(vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tu(vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tu(vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tu(vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tu(vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tu(vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tu(vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tu(vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tu(vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tu(vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tu(vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tu(vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tu(vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tu(vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tu(merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C new file mode 100644 index 0000000..d9292b7 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-1.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C new file mode 100644 index 0000000..691d086 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-2.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C new file mode 100644 index 0000000..113f1f7 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv32-3.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C new file mode 100644 index 0000000..3d7b687 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-1.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C new file mode 100644 index 0000000..853580b --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-2.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C new file mode 100644 index 0000000..5fed85d --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tum_rv64-3.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tum(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tum(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tum(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tum(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tum(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tum(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tum(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tum(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tum(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tum(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tum(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tum(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tum(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tum(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tum(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tum(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tum(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tum(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tum(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tum(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tum(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tum(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tum(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tum(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tum(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tum(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tum(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tum(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tum(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tum(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tum(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tum(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tum(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tum(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tum(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tum(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tum(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tum(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tum(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*m[au]\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C new file mode 100644 index 0000000..74ec5b7 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-1.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C new file mode 100644 index 0000000..a6857f1 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-2.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C new file mode 100644 index 0000000..a07bd57 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv32-3.C @@ -0,0 +1,289 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv32gcv -mabi=ilp32d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vmul\.vv\s+v[0-9]+,\s*v[0-9]+,\s*v[0-9]+,\s*v0.t\s+} 8 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C new file mode 100644 index 0000000..46901e0 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-1.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,vl); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C new file mode 100644 index 0000000..68c7682 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-2.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,31); +} + + + +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetivli\s+zero,\s*31,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ diff --git a/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C new file mode 100644 index 0000000..75143a6 --- /dev/null +++ b/gcc/testsuite/g++.target/riscv/rvv/base/vmul_vx_tumu_rv64-3.C @@ -0,0 +1,292 @@ +/* { dg-do compile } */ +/* { dg-options "-march=rv64gcv -mabi=lp64d -O3 -fno-schedule-insns -fno-schedule-insns2" } */ + +#include "riscv_vector.h" + +vint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vint8mf8_t merge,vint8mf8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vint8mf4_t merge,vint8mf4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vint8mf2_t merge,vint8mf2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vint8m1_t merge,vint8m1_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vint8m2_t merge,vint8m2_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vint8m4_t merge,vint8m4_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vint8m8_t merge,vint8m8_t op1,int8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vint16mf4_t merge,vint16mf4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vint16mf2_t merge,vint16mf2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vint16m1_t merge,vint16m1_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vint16m2_t merge,vint16m2_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vint16m4_t merge,vint16m4_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vint16m8_t merge,vint16m8_t op1,int16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vint32mf2_t merge,vint32mf2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vint32m1_t merge,vint32m1_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vint32m2_t merge,vint32m2_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vint32m4_t merge,vint32m4_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vint32m8_t merge,vint32m8_t op1,int32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vint64m1_t merge,vint64m1_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vint64m2_t merge,vint64m2_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vint64m4_t merge,vint64m4_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vint64m8_t merge,vint64m8_t op1,int64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf8_t test___riscv_vmul_tumu(vbool64_t mask,vuint8mf8_t merge,vuint8mf8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf4_t test___riscv_vmul_tumu(vbool32_t mask,vuint8mf4_t merge,vuint8mf4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8mf2_t test___riscv_vmul_tumu(vbool16_t mask,vuint8mf2_t merge,vuint8mf2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m1_t test___riscv_vmul_tumu(vbool8_t mask,vuint8m1_t merge,vuint8m1_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m2_t test___riscv_vmul_tumu(vbool4_t mask,vuint8m2_t merge,vuint8m2_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m4_t test___riscv_vmul_tumu(vbool2_t mask,vuint8m4_t merge,vuint8m4_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint8m8_t test___riscv_vmul_tumu(vbool1_t mask,vuint8m8_t merge,vuint8m8_t op1,uint8_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf4_t test___riscv_vmul_tumu(vbool64_t mask,vuint16mf4_t merge,vuint16mf4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16mf2_t test___riscv_vmul_tumu(vbool32_t mask,vuint16mf2_t merge,vuint16mf2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m1_t test___riscv_vmul_tumu(vbool16_t mask,vuint16m1_t merge,vuint16m1_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m2_t test___riscv_vmul_tumu(vbool8_t mask,vuint16m2_t merge,vuint16m2_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m4_t test___riscv_vmul_tumu(vbool4_t mask,vuint16m4_t merge,vuint16m4_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint16m8_t test___riscv_vmul_tumu(vbool2_t mask,vuint16m8_t merge,vuint16m8_t op1,uint16_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32mf2_t test___riscv_vmul_tumu(vbool64_t mask,vuint32mf2_t merge,vuint32mf2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m1_t test___riscv_vmul_tumu(vbool32_t mask,vuint32m1_t merge,vuint32m1_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m2_t test___riscv_vmul_tumu(vbool16_t mask,vuint32m2_t merge,vuint32m2_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m4_t test___riscv_vmul_tumu(vbool8_t mask,vuint32m4_t merge,vuint32m4_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint32m8_t test___riscv_vmul_tumu(vbool4_t mask,vuint32m8_t merge,vuint32m8_t op1,uint32_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m1_t test___riscv_vmul_tumu(vbool64_t mask,vuint64m1_t merge,vuint64m1_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m2_t test___riscv_vmul_tumu(vbool32_t mask,vuint64m2_t merge,vuint64m2_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m4_t test___riscv_vmul_tumu(vbool16_t mask,vuint64m4_t merge,vuint64m4_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + +vuint64m8_t test___riscv_vmul_tumu(vbool8_t mask,vuint64m8_t merge,vuint64m8_t op1,uint64_t op2,size_t vl) +{ + return __riscv_vmul_tumu(mask,merge,op1,op2,32); +} + + + +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e8,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e16,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*mf2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e32,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m1,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m2,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m4,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ +/* { dg-final { scan-assembler-times {vsetvli\s+zero,\s*[a-x0-9]+,\s*e64,\s*m8,\s*tu,\s*mu\s+vmul\.vx\s+v[0-9]+,\s*v[0-9]+,\s*[a-x0-9]+,\s*v0.t\s+} 2 } } */ -- 2.7.4