(COPY_TO_REGCLASS (MOVSDrm addr:$src), VR128)>;
}
-// Aliases to help the assembler pick two byte VEX encodings by swapping the
-// operands relative to the normal instructions to use VEX.R instead of VEX.B.
-def : InstAlias<"vmovss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- (VMOVSSrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
-def : InstAlias<"vmovsd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
- (VMOVSDrr_REV VR128L:$dst, VR128:$src1, VR128H:$src2), 0>;
-
//===----------------------------------------------------------------------===//
// SSE 1 & 2 - Move Aligned/Unaligned FP Instructions
//===----------------------------------------------------------------------===//
} // SchedRW
} // Predicate
-// Aliases to help the assembler pick two byte VEX encodings by swapping the
-// operands relative to the normal instructions to use VEX.R instead of VEX.B.
-def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
- (VMOVAPSrr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
- (VMOVAPDrr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
- (VMOVUPSrr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
- (VMOVUPDrr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovaps\t{$src, $dst|$dst, $src}",
- (VMOVAPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
-def : InstAlias<"vmovapd\t{$src, $dst|$dst, $src}",
- (VMOVAPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
-def : InstAlias<"vmovups\t{$src, $dst|$dst, $src}",
- (VMOVUPSYrr_REV VR256L:$dst, VR256H:$src), 0>;
-def : InstAlias<"vmovupd\t{$src, $dst|$dst, $src}",
- (VMOVUPDYrr_REV VR256L:$dst, VR256H:$src), 0>;
-
// Reversed version with ".s" suffix for GAS compatibility.
def : InstAlias<"vmovaps.s\t{$src, $dst|$dst, $src}",
(VMOVAPSrr_REV VR128:$dst, VR128:$src), 0>;
} // ExeDomain = SSEPackedInt
-// Aliases to help the assembler pick two byte VEX encodings by swapping the
-// operands relative to the normal instructions to use VEX.R instead of VEX.B.
-def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
- (VMOVDQArr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovdqa\t{$src, $dst|$dst, $src}",
- (VMOVDQAYrr_REV VR256L:$dst, VR256H:$src), 0>;
-def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
- (VMOVDQUrr_REV VR128L:$dst, VR128H:$src), 0>;
-def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}",
- (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>;
-
// Reversed version with ".s" suffix for GAS compatibility.
def : InstAlias<"vmovdqa.s\t{$src, $dst|$dst, $src}",
(VMOVDQArr_REV VR128:$dst, VR128:$src), 0>;
"movq\t{$src, $dst|$dst, $src}", []>;
}
-// Aliases to help the assembler pick two byte VEX encodings by swapping the
-// operands relative to the normal instructions to use VEX.R instead of VEX.B.
-def : InstAlias<"vmovq\t{$src, $dst|$dst, $src}",
- (VMOVPQI2QIrr VR128L:$dst, VR128H:$src), 0>;
-
def : InstAlias<"vmovq.s\t{$src, $dst|$dst, $src}",
(VMOVPQI2QIrr VR128:$dst, VR128:$src), 0>;
def : InstAlias<"movq.s\t{$src, $dst|$dst, $src}",
// CHECK: encoding: [0xc4,0x02,0x3d,0x91,0x14,0x4f]
vpgatherqd %xmm8, (%r15,%ymm9,2), %xmm10
+// CHECK: vmovq %xmm0, %xmm8
+// CHECK: encoding: [0xc5,0x7a,0x7e,0xc0]
+ vmovq %xmm0, %xmm8
+
+// CHECK: vmovq %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x7a,0x7e,0xc0]
+ {vex3} vmovq %xmm0, %xmm8
+
+// CHECK: vmovq %xmm8, %xmm0
+// CHECK: encoding: [0xc5,0x79,0xd6,0xc0]
+ vmovq %xmm8, %xmm0
+
+// CHECK: vmovq %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x7a,0x7e,0xc0]
+ {vex3} vmovq %xmm8, %xmm0
+
+// CHECK: vmovdqa %xmm0, %xmm8
+// CHECK: encoding: [0xc5,0x79,0x6f,0xc0]
+ vmovdqa %xmm0, %xmm8
+
+// CHECK: vmovdqa %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x79,0x6f,0xc0]
+ {vex3} vmovdqa %xmm0, %xmm8
+
+// CHECK: vmovdqa %xmm8, %xmm0
+// CHECK: encoding: [0xc5,0x79,0x7f,0xc0]
+ vmovdqa %xmm8, %xmm0
+
+// CHECK: vmovdqa %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x79,0x6f,0xc0]
+ {vex3} vmovdqa %xmm8, %xmm0
+
+// CHECK: vmovdqu %xmm0, %xmm8
+// CHECK: encoding: [0xc5,0x7a,0x6f,0xc0]
+ vmovdqu %xmm0, %xmm8
+
+// CHECK: vmovdqu %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x7a,0x6f,0xc0]
+ {vex3} vmovdqu %xmm0, %xmm8
+
+// CHECK: vmovdqu %xmm8, %xmm0
+// CHECK: encoding: [0xc5,0x7a,0x7f,0xc0]
+ vmovdqu %xmm8, %xmm0
+
+// CHECK: vmovdqu %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x7a,0x6f,0xc0]
+ {vex3} vmovdqu %xmm8, %xmm0
+
// CHECK: vmovaps %xmm0, %xmm8
// CHECK: encoding: [0xc5,0x78,0x28,0xc0]
vmovaps %xmm0, %xmm8
+// CHECK: vmovaps %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x78,0x28,0xc0]
+ {vex3} vmovaps %xmm0, %xmm8
+
// CHECK: vmovaps %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x78,0x29,0xc0]
vmovaps %xmm8, %xmm0
+// CHECK: vmovaps %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x78,0x28,0xc0]
+ {vex3} vmovaps %xmm8, %xmm0
+
// CHECK: vmovaps %ymm0, %ymm8
// CHECK: encoding: [0xc5,0x7c,0x28,0xc0]
vmovaps %ymm0, %ymm8
+// CHECK: vmovaps %ymm0, %ymm8
+// CHECK: encoding: [0xc4,0x61,0x7c,0x28,0xc0]
+ {vex3} vmovaps %ymm0, %ymm8
+
// CHECK: vmovaps %ymm8, %ymm0
// CHECK: encoding: [0xc5,0x7c,0x29,0xc0]
vmovaps %ymm8, %ymm0
+// CHECK: vmovaps %ymm8, %ymm0
+// CHECK: encoding: [0xc4,0xc1,0x7c,0x28,0xc0]
+ {vex3} vmovaps %ymm8, %ymm0
+
// CHECK: vmovups %xmm0, %xmm8
// CHECK: encoding: [0xc5,0x78,0x10,0xc0]
vmovups %xmm0, %xmm8
+// CHECK: vmovups %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x78,0x10,0xc0]
+ {vex3} vmovups %xmm0, %xmm8
+
// CHECK: vmovups %xmm8, %xmm0
// CHECK: encoding: [0xc5,0x78,0x11,0xc0]
vmovups %xmm8, %xmm0
+// CHECK: vmovups %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x78,0x10,0xc0]
+ {vex3} vmovups %xmm8, %xmm0
+
// CHECK: vmovups %ymm0, %ymm8
// CHECK: encoding: [0xc5,0x7c,0x10,0xc0]
vmovups %ymm0, %ymm8
+// CHECK: vmovups %ymm0, %ymm8
+// CHECK: encoding: [0xc4,0x61,0x7c,0x10,0xc0]
+ {vex3} vmovups %ymm0, %ymm8
+
// CHECK: vmovups %ymm8, %ymm0
// CHECK: encoding: [0xc5,0x7c,0x11,0xc0]
vmovups %ymm8, %ymm0
+// CHECK: vmovups %ymm8, %ymm0
+// CHECK: encoding: [0xc4,0xc1,0x7c,0x10,0xc0]
+ {vex3} vmovups %ymm8, %ymm0
+
// CHECK: vmovss %xmm0, %xmm0, %xmm8
// CHECK: encoding: [0xc5,0x7a,0x10,0xc0]
vmovss %xmm0, %xmm0, %xmm8
+// CHECK: vmovss %xmm0, %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x7a,0x10,0xc0]
+ {vex3} vmovss %xmm0, %xmm0, %xmm8
+
// CHECK: vmovss %xmm0, %xmm8, %xmm0
// CHECK: encoding: [0xc5,0xba,0x10,0xc0]
vmovss %xmm0, %xmm8, %xmm0
+// CHECK: vmovss %xmm0, %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xe1,0x3a,0x10,0xc0]
+ {vex3} vmovss %xmm0, %xmm8, %xmm0
+
// CHECK: vmovss %xmm8, %xmm0, %xmm0
// CHECK: encoding: [0xc5,0x7a,0x11,0xc0]
vmovss %xmm8, %xmm0, %xmm0
+// CHECK: vmovss %xmm8, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x7a,0x10,0xc0]
+ {vex3} vmovss %xmm8, %xmm0, %xmm0
+
// CHECK: vmovsd %xmm0, %xmm0, %xmm8
// CHECK: encoding: [0xc5,0x7b,0x10,0xc0]
vmovsd %xmm0, %xmm0, %xmm8
+// CHECK: vmovsd %xmm0, %xmm0, %xmm8
+// CHECK: encoding: [0xc4,0x61,0x7b,0x10,0xc0]
+ {vex3} vmovsd %xmm0, %xmm0, %xmm8
+
// CHECK: vmovsd %xmm0, %xmm8, %xmm0
// CHECK: encoding: [0xc5,0xbb,0x10,0xc0]
vmovsd %xmm0, %xmm8, %xmm0
+// CHECK: vmovsd %xmm0, %xmm8, %xmm0
+// CHECK: encoding: [0xc4,0xe1,0x3b,0x10,0xc0]
+ {vex3} vmovsd %xmm0, %xmm8, %xmm0
+
// CHECK: vmovsd %xmm8, %xmm0, %xmm0
// CHECK: encoding: [0xc5,0x7b,0x11,0xc0]
vmovsd %xmm8, %xmm0, %xmm0
+// CHECK: vmovsd %xmm8, %xmm0, %xmm0
+// CHECK: encoding: [0xc4,0xc1,0x7b,0x10,0xc0]
+ {vex3} vmovsd %xmm8, %xmm0, %xmm0
+
// CHECK: vpmaddwd %xmm3, %xmm2, %xmm1
// CHECK: encoding: [0xc5,0xe9,0xf5,0xcb]
vpmaddwd %xmm3, %xmm2, %xmm1