If SETUNE isn't legal, UO can use the NOT of the SETO expansion.
Removes some complex isel patterns. Most of the test changes are
from using XORI instead of SEQZ.
Differential Revision: https://reviews.llvm.org/D92008
unsigned Opc = 0;
switch (CCCode) {
default: llvm_unreachable("Don't know how to expand this condition!");
+ case ISD::SETUO:
+ if (TLI.isCondCodeLegal(ISD::SETUNE, OpVT)) {
+ CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR;
+ break;
+ }
+ assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT) &&
+ "If SETUE is expanded, SETOEQ or SETUNE must be legal!");
+ NeedInvert = true;
+ LLVM_FALLTHROUGH;
case ISD::SETO:
assert(TLI.isCondCodeLegal(ISD::SETOEQ, OpVT)
&& "If SETO is expanded, SETOEQ must be legal!");
CC1 = ISD::SETOEQ; CC2 = ISD::SETOEQ; Opc = ISD::AND; break;
- case ISD::SETUO:
- assert(TLI.isCondCodeLegal(ISD::SETUNE, OpVT)
- && "If SETUO is expanded, SETUNE must be legal!");
- CC1 = ISD::SETUNE; CC2 = ISD::SETUNE; Opc = ISD::OR; break;
case ISD::SETOEQ:
case ISD::SETOGT:
case ISD::SETOGE:
ISD::CondCode FPCCToExpand[] = {
ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
- ISD::SETGE, ISD::SETNE};
+ ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO};
ISD::NodeType FPOpToExpand[] = {
ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
def : PatFpr64Fpr64<setle, FLE_D>;
def : PatFpr64Fpr64<setole, FLE_D>;
-// Define pattern expansions for setcc operations which aren't directly
-// handled by a RISC-V instruction and aren't expanded in the SelectionDAG
-// Legalizer.
-
-def : Pat<(seto FPR64:$rs1, FPR64:$rs2),
- (AND (FEQ_D FPR64:$rs1, FPR64:$rs1),
- (FEQ_D FPR64:$rs2, FPR64:$rs2))>;
-def : Pat<(seto FPR64:$rs1, FPR64:$rs1),
- (FEQ_D $rs1, $rs1)>;
-
-def : Pat<(setuo FPR64:$rs1, FPR64:$rs2),
- (SLTIU (AND (FEQ_D FPR64:$rs1, FPR64:$rs1),
- (FEQ_D FPR64:$rs2, FPR64:$rs2)),
- 1)>;
-def : Pat<(setuo FPR64:$rs1, FPR64:$rs1),
- (SLTIU (FEQ_D $rs1, $rs1), 1)>;
-
def Select_FPR64_Using_CC_GPR : SelectCC_rrirr<FPR64, GPR>;
/// Loads
def : PatFpr32Fpr32<setle, FLE_S>;
def : PatFpr32Fpr32<setole, FLE_S>;
-// Define pattern expansions for setcc operations which aren't directly
-// handled by a RISC-V instruction and aren't expanded in the SelectionDAG
-// Legalizer.
-
-def : Pat<(seto FPR32:$rs1, FPR32:$rs2),
- (AND (FEQ_S FPR32:$rs1, FPR32:$rs1),
- (FEQ_S FPR32:$rs2, FPR32:$rs2))>;
-def : Pat<(seto FPR32:$rs1, FPR32:$rs1),
- (FEQ_S $rs1, $rs1)>;
-
-def : Pat<(setuo FPR32:$rs1, FPR32:$rs2),
- (SLTIU (AND (FEQ_S FPR32:$rs1, FPR32:$rs1),
- (FEQ_S FPR32:$rs2, FPR32:$rs2)),
- 1)>;
-def : Pat<(setuo FPR32:$rs1, FPR32:$rs1),
- (SLTIU (FEQ_S $rs1, $rs1), 1)>;
-
def Select_FPR32_Using_CC_GPR : SelectCC_rrirr<FPR32, GPR>;
/// Loads
def : PatFpr16Fpr16<setle, FLE_H>;
def : PatFpr16Fpr16<setole, FLE_H>;
-// Define pattern expansions for setcc operations which aren't directly
-// handled by a RISC-V instruction and aren't expanded in the SelectionDAG
-// Legalizer.
-
-def : Pat<(seto FPR16:$rs1, FPR16:$rs2),
- (AND (FEQ_H FPR16:$rs1, FPR16:$rs1),
- (FEQ_H FPR16:$rs2, FPR16:$rs2))>;
-def : Pat<(seto FPR16:$rs1, FPR16:$rs1),
- (FEQ_H $rs1, $rs1)>;
-
-def : Pat<(setuo FPR16:$rs1, FPR16:$rs2),
- (SLTIU (AND (FEQ_H FPR16:$rs1, FPR16:$rs1),
- (FEQ_H FPR16:$rs2, FPR16:$rs2)),
- 1)>;
-def : Pat<(setuo FPR16:$rs1, FPR16:$rs1),
- (SLTIU (FEQ_H $rs1, $rs1), 1)>;
-
def Select_FPR16_Using_CC_GPR : SelectCC_rrirr<FPR16, GPR>;
/// Loads
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: feq.d a2, ft1, ft1
; RV32IFD-NEXT: and a1, a2, a1
-; RV32IFD-NEXT: seqz a1, a1
+; RV32IFD-NEXT: xori a1, a1, 1
; RV32IFD-NEXT: or a0, a0, a1
; RV32IFD-NEXT: bnez a0, .LBB9_2
; RV32IFD-NEXT: # %bb.1: # %if.else
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: feq.d a2, ft1, ft1
; RV64IFD-NEXT: and a1, a2, a1
-; RV64IFD-NEXT: seqz a1, a1
+; RV64IFD-NEXT: xori a1, a1, 1
; RV64IFD-NEXT: or a0, a0, a1
; RV64IFD-NEXT: bnez a0, .LBB9_2
; RV64IFD-NEXT: # %bb.1: # %if.else
; RV32IFD-NEXT: feq.d a0, ft1, ft1
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: and a0, a1, a0
-; RV32IFD-NEXT: seqz a0, a0
-; RV32IFD-NEXT: bnez a0, .LBB15_2
+; RV32IFD-NEXT: addi a1, zero, 1
+; RV32IFD-NEXT: bne a0, a1, .LBB15_2
; RV32IFD-NEXT: # %bb.1: # %if.else
; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: feq.d a0, ft1, ft1
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: and a0, a1, a0
-; RV64IFD-NEXT: seqz a0, a0
-; RV64IFD-NEXT: bnez a0, .LBB15_2
+; RV64IFD-NEXT: addi a1, zero, 1
+; RV64IFD-NEXT: bne a0, a1, .LBB15_2
; RV64IFD-NEXT: # %bb.1: # %if.else
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: feq.d a2, ft1, ft1
; RV32IFD-NEXT: and a1, a2, a1
-; RV32IFD-NEXT: seqz a1, a1
+; RV32IFD-NEXT: xori a1, a1, 1
; RV32IFD-NEXT: or a0, a0, a1
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: feq.d a2, ft1, ft1
; RV64IFD-NEXT: and a1, a2, a1
-; RV64IFD-NEXT: seqz a1, a1
+; RV64IFD-NEXT: xori a1, a1, 1
; RV64IFD-NEXT: or a0, a0, a1
; RV64IFD-NEXT: ret
%1 = fcmp ueq double %a, %b
; RV32IFD-NEXT: feq.d a0, ft1, ft1
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: and a0, a1, a0
-; RV32IFD-NEXT: seqz a0, a0
+; RV32IFD-NEXT: xori a0, a0, 1
; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
;
; RV64IFD-NEXT: feq.d a0, ft1, ft1
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: and a0, a1, a0
-; RV64IFD-NEXT: seqz a0, a0
+; RV64IFD-NEXT: xori a0, a0, 1
; RV64IFD-NEXT: ret
%1 = fcmp uno double %a, %b
%2 = zext i1 %1 to i32
; RV32IFD-LABEL: double_is_nan:
; RV32IFD: # %bb.0:
; RV32IFD-NEXT: feq.d a0, fa0, fa0
-; RV32IFD-NEXT: seqz a0, a0
+; RV32IFD-NEXT: xori a0, a0, 1
; RV32IFD-NEXT: ret
;
; RV64IFD-LABEL: double_is_nan:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: feq.d a0, fa0, fa0
-; RV64IFD-NEXT: seqz a0, a0
+; RV64IFD-NEXT: xori a0, a0, 1
; RV64IFD-NEXT: ret
%1 = fcmp uno double %a, 0.000000e+00
ret i1 %1
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: feq.d a2, ft1, ft1
; RV32IFD-NEXT: and a1, a2, a1
-; RV32IFD-NEXT: seqz a1, a1
+; RV32IFD-NEXT: xori a1, a1, 1
; RV32IFD-NEXT: or a0, a0, a1
; RV32IFD-NEXT: bnez a0, .LBB8_2
; RV32IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: feq.d a1, ft1, ft1
; RV64IFD-NEXT: feq.d a2, ft0, ft0
; RV64IFD-NEXT: and a1, a2, a1
-; RV64IFD-NEXT: seqz a1, a1
+; RV64IFD-NEXT: xori a1, a1, 1
; RV64IFD-NEXT: or a0, a0, a1
; RV64IFD-NEXT: bnez a0, .LBB8_2
; RV64IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: feq.d a0, ft1, ft1
; RV32IFD-NEXT: feq.d a1, ft0, ft0
; RV32IFD-NEXT: and a0, a1, a0
-; RV32IFD-NEXT: seqz a0, a0
+; RV32IFD-NEXT: xori a0, a0, 1
; RV32IFD-NEXT: bnez a0, .LBB14_2
; RV32IFD-NEXT: # %bb.1:
; RV32IFD-NEXT: fmv.d ft0, ft1
; RV64IFD-NEXT: feq.d a0, ft1, ft1
; RV64IFD-NEXT: feq.d a1, ft0, ft0
; RV64IFD-NEXT: and a0, a1, a0
-; RV64IFD-NEXT: seqz a0, a0
+; RV64IFD-NEXT: xori a0, a0, 1
; RV64IFD-NEXT: bnez a0, .LBB14_2
; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: fmv.d ft0, ft1
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: feq.s a2, ft1, ft1
; RV32IF-NEXT: and a1, a2, a1
-; RV32IF-NEXT: seqz a1, a1
+; RV32IF-NEXT: xori a1, a1, 1
; RV32IF-NEXT: or a0, a0, a1
; RV32IF-NEXT: bnez a0, .LBB9_2
; RV32IF-NEXT: # %bb.1: # %if.else
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: feq.s a2, ft1, ft1
; RV64IF-NEXT: and a1, a2, a1
-; RV64IF-NEXT: seqz a1, a1
+; RV64IF-NEXT: xori a1, a1, 1
; RV64IF-NEXT: or a0, a0, a1
; RV64IF-NEXT: bnez a0, .LBB9_2
; RV64IF-NEXT: # %bb.1: # %if.else
; RV32IF-NEXT: feq.s a0, ft1, ft1
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: and a0, a1, a0
-; RV32IF-NEXT: seqz a0, a0
-; RV32IF-NEXT: bnez a0, .LBB15_2
+; RV32IF-NEXT: addi a1, zero, 1
+; RV32IF-NEXT: bne a0, a1, .LBB15_2
; RV32IF-NEXT: # %bb.1: # %if.else
; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: feq.s a0, ft1, ft1
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: and a0, a1, a0
-; RV64IF-NEXT: seqz a0, a0
-; RV64IF-NEXT: bnez a0, .LBB15_2
+; RV64IF-NEXT: addi a1, zero, 1
+; RV64IF-NEXT: bne a0, a1, .LBB15_2
; RV64IF-NEXT: # %bb.1: # %if.else
; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: feq.s a2, ft1, ft1
; RV32IF-NEXT: and a1, a2, a1
-; RV32IF-NEXT: seqz a1, a1
+; RV32IF-NEXT: xori a1, a1, 1
; RV32IF-NEXT: or a0, a0, a1
; RV32IF-NEXT: ret
;
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: feq.s a2, ft1, ft1
; RV64IF-NEXT: and a1, a2, a1
-; RV64IF-NEXT: seqz a1, a1
+; RV64IF-NEXT: xori a1, a1, 1
; RV64IF-NEXT: or a0, a0, a1
; RV64IF-NEXT: ret
%1 = fcmp ueq float %a, %b
; RV32IF-NEXT: feq.s a0, ft1, ft1
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: and a0, a1, a0
-; RV32IF-NEXT: seqz a0, a0
+; RV32IF-NEXT: xori a0, a0, 1
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: fcmp_uno:
; RV64IF-NEXT: feq.s a0, ft1, ft1
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: and a0, a1, a0
-; RV64IF-NEXT: seqz a0, a0
+; RV64IF-NEXT: xori a0, a0, 1
; RV64IF-NEXT: ret
%1 = fcmp uno float %a, %b
%2 = zext i1 %1 to i32
; RV32IF-LABEL: float_is_nan:
; RV32IF: # %bb.0:
; RV32IF-NEXT: feq.s a0, fa0, fa0
-; RV32IF-NEXT: seqz a0, a0
+; RV32IF-NEXT: xori a0, a0, 1
; RV32IF-NEXT: ret
;
; RV64IF-LABEL: float_is_nan:
; RV64IF: # %bb.0:
; RV64IF-NEXT: feq.s a0, fa0, fa0
-; RV64IF-NEXT: seqz a0, a0
+; RV64IF-NEXT: xori a0, a0, 1
; RV64IF-NEXT: ret
%1 = fcmp uno float %a, 0.000000e+00
ret i1 %1
; RV32IF-NEXT: feq.s a1, ft1, ft1
; RV32IF-NEXT: feq.s a2, ft0, ft0
; RV32IF-NEXT: and a1, a2, a1
-; RV32IF-NEXT: seqz a1, a1
+; RV32IF-NEXT: xori a1, a1, 1
; RV32IF-NEXT: or a0, a0, a1
; RV32IF-NEXT: bnez a0, .LBB8_2
; RV32IF-NEXT: # %bb.1:
; RV64IF-NEXT: feq.s a1, ft1, ft1
; RV64IF-NEXT: feq.s a2, ft0, ft0
; RV64IF-NEXT: and a1, a2, a1
-; RV64IF-NEXT: seqz a1, a1
+; RV64IF-NEXT: xori a1, a1, 1
; RV64IF-NEXT: or a0, a0, a1
; RV64IF-NEXT: bnez a0, .LBB8_2
; RV64IF-NEXT: # %bb.1:
; RV32IF-NEXT: feq.s a0, ft1, ft1
; RV32IF-NEXT: feq.s a1, ft0, ft0
; RV32IF-NEXT: and a0, a1, a0
-; RV32IF-NEXT: seqz a0, a0
+; RV32IF-NEXT: xori a0, a0, 1
; RV32IF-NEXT: bnez a0, .LBB14_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: fmv.s ft0, ft1
; RV64IF-NEXT: feq.s a0, ft1, ft1
; RV64IF-NEXT: feq.s a1, ft0, ft0
; RV64IF-NEXT: and a0, a1, a0
-; RV64IF-NEXT: seqz a0, a0
+; RV64IF-NEXT: xori a0, a0, 1
; RV64IF-NEXT: bnez a0, .LBB14_2
; RV64IF-NEXT: # %bb.1:
; RV64IF-NEXT: fmv.s ft0, ft1
; RV32IZFH-NEXT: feq.h a1, fa1, fa1
; RV32IZFH-NEXT: feq.h a2, fa0, fa0
; RV32IZFH-NEXT: and a1, a2, a1
-; RV32IZFH-NEXT: seqz a1, a1
+; RV32IZFH-NEXT: xori a1, a1, 1
; RV32IZFH-NEXT: or a0, a0, a1
; RV32IZFH-NEXT: bnez a0, .LBB9_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
; RV64IZFH-NEXT: feq.h a1, fa1, fa1
; RV64IZFH-NEXT: feq.h a2, fa0, fa0
; RV64IZFH-NEXT: and a1, a2, a1
-; RV64IZFH-NEXT: seqz a1, a1
+; RV64IZFH-NEXT: xori a1, a1, 1
; RV64IZFH-NEXT: or a0, a0, a1
; RV64IZFH-NEXT: bnez a0, .LBB9_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
-; RV32IZFH-NEXT: seqz a0, a0
-; RV32IZFH-NEXT: bnez a0, .LBB15_2
+; RV32IZFH-NEXT: addi a1, zero, 1
+; RV32IZFH-NEXT: bne a0, a1, .LBB15_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
-; RV64IZFH-NEXT: seqz a0, a0
-; RV64IZFH-NEXT: bnez a0, .LBB15_2
+; RV64IZFH-NEXT: addi a1, zero, 1
+; RV64IZFH-NEXT: bne a0, a1, .LBB15_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: feq.h a1, fa1, fa1
; RV32IZFH-NEXT: feq.h a2, fa0, fa0
; RV32IZFH-NEXT: and a1, a2, a1
-; RV32IZFH-NEXT: seqz a1, a1
+; RV32IZFH-NEXT: xori a1, a1, 1
; RV32IZFH-NEXT: or a0, a0, a1
; RV32IZFH-NEXT: ret
;
; RV64IZFH-NEXT: feq.h a1, fa1, fa1
; RV64IZFH-NEXT: feq.h a2, fa0, fa0
; RV64IZFH-NEXT: and a1, a2, a1
-; RV64IZFH-NEXT: seqz a1, a1
+; RV64IZFH-NEXT: xori a1, a1, 1
; RV64IZFH-NEXT: or a0, a0, a1
; RV64IZFH-NEXT: ret
%1 = fcmp ueq half %a, %b
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
-; RV32IZFH-NEXT: seqz a0, a0
+; RV32IZFH-NEXT: xori a0, a0, 1
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: fcmp_uno:
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
-; RV64IZFH-NEXT: seqz a0, a0
+; RV64IZFH-NEXT: xori a0, a0, 1
; RV64IZFH-NEXT: ret
%1 = fcmp uno half %a, %b
%2 = zext i1 %1 to i32
; RV32IZFH-LABEL: half_is_nan:
; RV32IZFH: # %bb.0:
; RV32IZFH-NEXT: feq.h a0, fa0, fa0
-; RV32IZFH-NEXT: seqz a0, a0
+; RV32IZFH-NEXT: xori a0, a0, 1
; RV32IZFH-NEXT: ret
;
; RV64IZFH-LABEL: half_is_nan:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: feq.h a0, fa0, fa0
-; RV64IZFH-NEXT: seqz a0, a0
+; RV64IZFH-NEXT: xori a0, a0, 1
; RV64IZFH-NEXT: ret
%1 = fcmp uno half %a, 0.000000e+00
ret i1 %1
; RV32IZFH-NEXT: feq.h a1, fa1, fa1
; RV32IZFH-NEXT: feq.h a2, fa0, fa0
; RV32IZFH-NEXT: and a1, a2, a1
-; RV32IZFH-NEXT: seqz a1, a1
+; RV32IZFH-NEXT: xori a1, a1, 1
; RV32IZFH-NEXT: or a0, a0, a1
; RV32IZFH-NEXT: bnez a0, .LBB8_2
; RV32IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: feq.h a1, fa1, fa1
; RV64IZFH-NEXT: feq.h a2, fa0, fa0
; RV64IZFH-NEXT: and a1, a2, a1
-; RV64IZFH-NEXT: seqz a1, a1
+; RV64IZFH-NEXT: xori a1, a1, 1
; RV64IZFH-NEXT: or a0, a0, a1
; RV64IZFH-NEXT: bnez a0, .LBB8_2
; RV64IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
-; RV32IZFH-NEXT: seqz a0, a0
+; RV32IZFH-NEXT: xori a0, a0, 1
; RV32IZFH-NEXT: bnez a0, .LBB14_2
; RV32IZFH-NEXT: # %bb.1:
; RV32IZFH-NEXT: fmv.h fa0, fa1
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
-; RV64IZFH-NEXT: seqz a0, a0
+; RV64IZFH-NEXT: xori a0, a0, 1
; RV64IZFH-NEXT: bnez a0, .LBB14_2
; RV64IZFH-NEXT: # %bb.1:
; RV64IZFH-NEXT: fmv.h fa0, fa1