def : InstAlias<"unzip4 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1100)>;
def : InstAlias<"zip2 $rd, $rs", (SHFLI GPR:$rd, GPR:$rs, 0b1110)>;
def : InstAlias<"unzip2 $rd, $rs", (UNSHFLI GPR:$rd, GPR:$rs, 0b1110)>;
+// zip and unzip are considered instructions rather than an alias.
def : InstAlias<"orc16 $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b10000)>;
def : InstAlias<"orc8 $rd, $rs", (GORCI GPR:$rd, GPR:$rs, 0b11000)>;
def : PatGprImm<riscv_unshfl, UNSHFLI, shfl_uimm>;
def : PatGprImm<riscv_grev, GREVI, uimmlog2xlen>;
def : PatGprImm<riscv_gorc, GORCI, uimmlog2xlen>;
+
+// We treat brev8 as a separate instruction, so match it directly.
+def : Pat<(riscv_grev GPR:$rs1, 7), (BREV8 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp]
let Predicates = [HasStdExtZbp, IsRV64] in
// We treat rev8 as a separate instruction, so match it directly.
def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32 GPR:$rs1)>;
+
+// We treat zip and unzip as separate instructions, so match it directly.
+def : Pat<(i32 (riscv_shfl GPR:$rs1, 15)), (ZIP_RV32 GPR:$rs1)>;
+def : Pat<(i32 (riscv_unshfl GPR:$rs1, 15)), (UNZIP_RV32 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp, IsRV32]
let Predicates = [HasStdExtZbp, IsRV64] in {
ret i32 %tmp
}
+define i32 @zipi32(i32 %a) nounwind {
+; RV32ZBP-LABEL: zipi32:
+; RV32ZBP: # %bb.0:
+; RV32ZBP-NEXT: zip a0, a0
+; RV32ZBP-NEXT: ret
+ %tmp = call i32 @llvm.riscv.shfl.i32(i32 %a, i32 15)
+ ret i32 %tmp
+}
+
declare i32 @llvm.riscv.unshfl.i32(i32 %a, i32 %b)
define i32 @unshfl32(i32 %a, i32 %b) nounwind {
ret i32 %tmp
}
+define i32 @unzipi32(i32 %a) nounwind {
+; RV32ZBP-LABEL: unzipi32:
+; RV32ZBP: # %bb.0:
+; RV32ZBP-NEXT: unzip a0, a0
+; RV32ZBP-NEXT: ret
+ %tmp = call i32 @llvm.riscv.unshfl.i32(i32 %a, i32 15)
+ ret i32 %tmp
+}
+
declare i32 @llvm.riscv.xperm.n.i32(i32 %a, i32 %b)
define i32 @xpermn32(i32 %a, i32 %b) nounwind {
;
; RV32ZBP-LABEL: grev7_i32:
; RV32ZBP: # %bb.0:
-; RV32ZBP-NEXT: grevi a0, a0, 7
+; RV32ZBP-NEXT: rev.b a0, a0
; RV32ZBP-NEXT: ret
%and1 = shl i32 %a, 1
%shl1 = and i32 %and1, -1431655766
;
; RV32ZBP-LABEL: grev7_i64:
; RV32ZBP: # %bb.0:
-; RV32ZBP-NEXT: grevi a0, a0, 7
-; RV32ZBP-NEXT: grevi a1, a1, 7
+; RV32ZBP-NEXT: rev.b a0, a0
+; RV32ZBP-NEXT: rev.b a1, a1
; RV32ZBP-NEXT: ret
%and1 = shl i64 %a, 1
%shl1 = and i64 %and1, -6148914691236517206
;
; RV32ZBP-LABEL: bitreverse_i8:
; RV32ZBP: # %bb.0:
-; RV32ZBP-NEXT: grevi a0, a0, 7
+; RV32ZBP-NEXT: rev.b a0, a0
; RV32ZBP-NEXT: ret
%1 = tail call i8 @llvm.bitreverse.i8(i8 %a)
ret i8 %1
;
; RV32ZBP-LABEL: bitreverse_bswap_i32:
; RV32ZBP: # %bb.0:
-; RV32ZBP-NEXT: grevi a0, a0, 7
+; RV32ZBP-NEXT: rev.b a0, a0
; RV32ZBP-NEXT: ret
%1 = call i32 @llvm.bitreverse.i32(i32 %a)
%2 = call i32 @llvm.bswap.i32(i32 %1)
;
; RV32ZBP-LABEL: bitreverse_bswap_i64:
; RV32ZBP: # %bb.0:
-; RV32ZBP-NEXT: grevi a0, a0, 7
-; RV32ZBP-NEXT: grevi a1, a1, 7
+; RV32ZBP-NEXT: rev.b a0, a0
+; RV32ZBP-NEXT: rev.b a1, a1
; RV32ZBP-NEXT: ret
%1 = call i64 @llvm.bitreverse.i64(i64 %a)
%2 = call i64 @llvm.bswap.i64(i64 %1)
;
; RV64ZBP-LABEL: grev7_i64:
; RV64ZBP: # %bb.0:
-; RV64ZBP-NEXT: grevi a0, a0, 7
+; RV64ZBP-NEXT: rev.b a0, a0
; RV64ZBP-NEXT: ret
%and1 = shl i64 %a, 1
%shl1 = and i64 %and1, -6148914691236517206
;
; RV64ZBP-LABEL: bitreverse_bswap_i64:
; RV64ZBP: # %bb.0:
-; RV64ZBP-NEXT: grevi a0, a0, 7
+; RV64ZBP-NEXT: rev.b a0, a0
; RV64ZBP-NEXT: ret
%1 = call i64 @llvm.bitreverse.i64(i64 %a)
%2 = call i64 @llvm.bswap.i64(i64 %1)