(S2_interleave DoubleRegs:$src1)>, Requires<[HasV5]>;
def: Pat<(int_hexagon_S2_deinterleave DoubleRegs:$src1),
(S2_deinterleave DoubleRegs:$src1)>, Requires<[HasV5]>;
-def: Pat<(int_hexagon_Y2_dcfetch IntRegs:$src1),
- (Y2_dcfetch IntRegs:$src1)>, Requires<[HasV5]>;
def: Pat<(int_hexagon_Y2_dczeroa IntRegs:$src1),
(Y2_dczeroa IntRegs:$src1)>, Requires<[HasV5]>;
def: Pat<(int_hexagon_Y2_dccleana IntRegs:$src1),
def: T_RR_pat<Y4_l2fetch, int_hexagon_Y4_l2fetch>;
def: T_RP_pat<Y5_l2fetch, int_hexagon_Y5_l2fetch>;
+def: Pat<(int_hexagon_Y2_dcfetch I32:$Rt), (Y2_dcfetchbo I32:$Rt, 0)>;
+
//
// Patterns for optimizing code generations for HVX.
--- /dev/null
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that intrinsic int_hexagon_Y2_dcfetch is mapped to Y2_dcfetchbo
+; (not Y2_dcfetch).
+
+; CHECK: dcfetch(r0+#0)
+
+target triple = "hexagon"
+
+define void @fred(i8* %a0) #0 {
+ call void @llvm.hexagon.Y2.dcfetch(i8* %a0)
+ ret void
+}
+
+declare void @llvm.hexagon.Y2.dcfetch(i8*) #0
+
+attributes #0 = { nounwind }
+