; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
-; RUN: -verify-machineinstrs < %s | FileCheck %s
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_0:
}
define double @extractelt_nxv16f64_neg1(<vscale x 16 x double> %v) {
+; RV32-LABEL: extractelt_nxv16f64_neg1:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -64
+; RV32-NEXT: .cfi_def_cfa_offset 64
+; RV32-NEXT: addi s0, sp, 64
+; RV32-NEXT: .cfi_def_cfa s0, 0
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -64
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: vs8r.v v8, (a0)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 3
+; RV32-NEXT: add a2, a0, a2
+; RV32-NEXT: vs8r.v v16, (a2)
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: fld fa0, -8(a0)
+; RV32-NEXT: addi sp, s0, -64
+; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: ret
+;
+; RV64-LABEL: extractelt_nxv16f64_neg1:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -64
+; RV64-NEXT: .cfi_def_cfa_offset 64
+; RV64-NEXT: addi s0, sp, 64
+; RV64-NEXT: .cfi_def_cfa s0, 0
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -64
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: vs8r.v v8, (a0)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a1, a2, 3
+; RV64-NEXT: add a3, a0, a1
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: srli a1, a1, 32
+; RV64-NEXT: slli a2, a2, 1
+; RV64-NEXT: addi a2, a2, -1
+; RV64-NEXT: vs8r.v v16, (a3)
+; RV64-NEXT: bltu a2, a1, .LBB52_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: mv a2, a1
+; RV64-NEXT: .LBB52_2:
+; RV64-NEXT: slli a1, a2, 3
+; RV64-NEXT: add a0, a0, a1
+; RV64-NEXT: fld fa0, 0(a0)
+; RV64-NEXT: addi sp, s0, -64
+; RV64-NEXT: addi sp, sp, 64
+; RV64-NEXT: ret
%r = extractelement <vscale x 16 x double> %v, i32 -1
ret double %r
}