From 671779456a90771adf5410cc24dbe60b92f335d9 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 15 Oct 2018 01:51:53 +0000 Subject: [PATCH] [X86] Add 128 MOVDDUP to the constant pool printing in X86AsmPrinter::EmitInstruction. We use this instruction to broadcast a single 64-bit value to a v2i64/v2f64 vector. llvm-svn: 344486 --- llvm/lib/Target/X86/X86MCInstLower.cpp | 6 ++ llvm/test/CodeGen/X86/avg.ll | 6 +- .../CodeGen/X86/bitcast-int-to-vector-bool-sext.ll | 3 +- .../CodeGen/X86/bitcast-int-to-vector-bool-zext.ll | 3 +- .../test/CodeGen/X86/bitcast-int-to-vector-bool.ll | 3 +- .../CodeGen/X86/broadcast-elm-cross-splat-vec.ll | 72 ++++++++++++++-------- llvm/test/CodeGen/X86/splat-for-size.ll | 9 ++- llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll | 3 +- 8 files changed, 72 insertions(+), 33 deletions(-) diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index acb2bc2..76f0dd4 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -2133,6 +2133,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { } } break; + case X86::MOVDDUPrm: + case X86::VMOVDDUPrm: + case X86::VMOVDDUPZ128rm: case X86::VBROADCASTSSrm: case X86::VBROADCASTSSYrm: case X86::VBROADCASTSSZ128m: @@ -2169,6 +2172,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { int NumElts; switch (MI->getOpcode()) { default: llvm_unreachable("Invalid opcode"); + case X86::MOVDDUPrm: NumElts = 2; break; + case X86::VMOVDDUPrm: NumElts = 2; break; + case X86::VMOVDDUPZ128rm: NumElts = 2; break; case X86::VBROADCASTSSrm: NumElts = 4; break; case X86::VBROADCASTSSYrm: NumElts = 8; break; case X86::VBROADCASTSSZ128m: NumElts = 4; break; diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index e8a03fe..84f1296 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -1256,7 +1256,8 @@ define void @avg_v32i8_const(<32 x i8>* %a) nounwind { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm2 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX1-NEXT: # xmm2 = mem[0,0] ; AVX1-NEXT: vpavgb %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpavgb %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -1310,7 +1311,8 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind { ; AVX1-NEXT: vmovdqa (%rdi), %ymm0 ; AVX1-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 -; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX1-NEXT: # xmm3 = mem[0,0] ; AVX1-NEXT: vpavgb %xmm3, %xmm2, %xmm2 ; AVX1-NEXT: vpavgb %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll index 74c48e3..c022d79 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-sext.ll @@ -158,7 +158,8 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: retq diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll index 6cd52c4..75b5b70 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool-zext.ll @@ -200,7 +200,8 @@ define <16 x i8> @ext_i16_16i8(i16 %a0) { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll index 1acc834..3deac92 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -163,7 +163,8 @@ define <16 x i1> @bitcast_i16_16i1(i16 zeroext %a0) { ; AVX1: # %bb.0: ; AVX1-NEXT: vmovd %edi, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] -; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-1.7939930131212661E-307,-1.7939930131212661E-307] +; AVX1-NEXT: # xmm1 = mem[0,0] ; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm0 diff --git a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll index 90f6559..bb79efc 100644 --- a/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll +++ b/llvm/test/CodeGen/X86/broadcast-elm-cross-splat-vec.ll @@ -87,21 +87,24 @@ define <16 x i8> @f16xi8_i32(<16 x i8> %a) { define <16 x i8> @f16xi8_i64(<16 x i8> %a) { ; AVX-LABEL: f16xi8_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f16xi8_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; ALL32-NEXT: # xmm1 = mem[0,0] ; ALL32-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f16xi8_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddb %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq @@ -202,7 +205,8 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) { ; AVX-LABEL: f32xi8_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -219,7 +223,8 @@ define <32 x i8> @f32xi8_i64(<32 x i8> %a) { ; AVX-64-LABEL: f32xi8_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddb %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddb %xmm2, %xmm0, %xmm0 ; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -424,7 +429,8 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) { ; AVX-LABEL: f64xi8_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-NEXT: # xmm3 = mem[0,0] ; AVX-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -456,7 +462,8 @@ define <64 x i8> @f64xi8_i64(<64 x i8> %a) { ; AVX-64-LABEL: f64xi8_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = [7.9499288951273625E-275,7.9499288951273625E-275] +; AVX-64-NEXT: # xmm3 = mem[0,0] ; AVX-64-NEXT: vpaddb %xmm3, %xmm2, %xmm2 ; AVX-64-NEXT: vpaddb %xmm3, %xmm1, %xmm1 ; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -675,21 +682,24 @@ define <8 x i16> @f8xi16_i32(<8 x i16> %a) { define <8 x i16> @f8xi16_i64(<8 x i16> %a) { ; AVX-LABEL: f8xi16_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f8xi16_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; ALL32-NEXT: # xmm1 = mem[0,0] ; ALL32-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f8xi16_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddw %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq @@ -750,7 +760,8 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) { ; AVX-LABEL: f16xi16_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -767,7 +778,8 @@ define <16 x i16> @f16xi16_i64(<16 x i16> %a) { ; AVX-64-LABEL: f16xi16_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddw %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddw %xmm2, %xmm0, %xmm0 ; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -902,7 +914,8 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) { ; AVX-LABEL: f32xi16_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-NEXT: # xmm3 = mem[0,0] ; AVX-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddw %xmm3, %xmm1, %xmm1 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -934,7 +947,8 @@ define <32 x i16> @f32xi16_i64(<32 x i16> %a) { ; AVX-64-LABEL: f32xi16_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = [4.1720559249406128E-309,4.1720559249406128E-309] +; AVX-64-NEXT: # xmm3 = mem[0,0] ; AVX-64-NEXT: vpaddw %xmm3, %xmm2, %xmm2 ; AVX-64-NEXT: vpaddw %xmm3, %xmm1, %xmm1 ; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1120,21 +1134,24 @@ define <32 x i16> @f32xi16_i256(<32 x i16> %a) { define <4 x i32> @f4xi32_i64(<4 x i32> %a) { ; AVX-LABEL: f4xi32_i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f4xi32_i64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; ALL32-NEXT: # xmm1 = mem[0,0] ; ALL32-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vpand %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f4xi32_i64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: retq @@ -1155,7 +1172,8 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) { ; AVX-LABEL: f8xi32_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -1172,7 +1190,8 @@ define <8 x i32> @f8xi32_i64(<8 x i32> %a) { ; AVX-64-LABEL: f8xi32_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm2 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: # xmm2 = mem[0,0] ; AVX-64-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX-64-NEXT: vpaddd %xmm2, %xmm0, %xmm0 ; AVX-64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 @@ -1237,7 +1256,8 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) { ; AVX-LABEL: f16xi32_i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm3 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-NEXT: # xmm3 = mem[0,0] ; AVX-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX-NEXT: vpaddd %xmm3, %xmm1, %xmm1 ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1269,7 +1289,8 @@ define <16 x i32> @f16xi32_i64(<16 x i32> %a) { ; AVX-64-LABEL: f16xi32_i64: ; AVX-64: # %bb.0: ; AVX-64-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm3 = [2.1219957909652723E-314,2.1219957909652723E-314] +; AVX-64-NEXT: # xmm3 = mem[0,0] ; AVX-64-NEXT: vpaddd %xmm3, %xmm2, %xmm2 ; AVX-64-NEXT: vpaddd %xmm3, %xmm1, %xmm1 ; AVX-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 @@ -1573,21 +1594,24 @@ define <8 x i64> @f8xi64_i256(<8 x i64> %a) { define <4 x float> @f4xf32_f64(<4 x float> %a) { ; AVX-LABEL: f4xf32_f64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [0.0078125018626451492,0.0078125018626451492] +; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retl ; ; ALL32-LABEL: f4xf32_f64: ; ALL32: # %bb.0: -; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; ALL32-NEXT: vmovddup {{.*#+}} xmm1 = [0.0078125018626451492,0.0078125018626451492] +; ALL32-NEXT: # xmm1 = mem[0,0] ; ALL32-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; ALL32-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; ALL32-NEXT: retl ; ; AVX-64-LABEL: f4xf32_f64: ; AVX-64: # %bb.0: -; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-64-NEXT: vmovddup {{.*#+}} xmm1 = [0.0078125018626451492,0.0078125018626451492] +; AVX-64-NEXT: # xmm1 = mem[0,0] ; AVX-64-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-64-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; AVX-64-NEXT: retq diff --git a/llvm/test/CodeGen/X86/splat-for-size.ll b/llvm/test/CodeGen/X86/splat-for-size.ll index 99ed8e8..7567dbc 100644 --- a/llvm/test/CodeGen/X86/splat-for-size.ll +++ b/llvm/test/CodeGen/X86/splat-for-size.ll @@ -9,7 +9,8 @@ define <2 x double> @splat_v2f64(<2 x double> %x) #0 { ; CHECK-LABEL: splat_v2f64: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; CHECK-NEXT: vmovddup {{.*#+}} xmm1 = [1,1] +; CHECK-NEXT: # xmm1 = mem[0,0] ; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: retq %add = fadd <2 x double> %x, @@ -51,7 +52,8 @@ define <8 x float> @splat_v8f32(<8 x float> %x) #1 { define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 { ; AVX-LABEL: splat_v2i64: ; AVX: # %bb.0: -; AVX-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2] +; AVX-NEXT: # xmm1 = mem[0,0] ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq ; @@ -70,7 +72,8 @@ define <4 x i64> @splat_v4i64(<4 x i64> %x) #0 { ; AVX-LABEL: splat_v4i64: ; AVX: # %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0] +; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2] +; AVX-NEXT: # xmm2 = mem[0,0] ; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpaddq %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 diff --git a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll index 27541c4..8238538 100644 --- a/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll +++ b/llvm/test/CodeGen/X86/urem-seteq-vec-nonsplat.ll @@ -629,7 +629,8 @@ define <4 x i32> @test_urem_both(<4 x i32> %X) nounwind readnone { ; ; CHECK-AVX1-LABEL: test_urem_both: ; CHECK-AVX1: # %bb.0: -; CHECK-AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0] +; CHECK-AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [-9.255967385052751E+61,-9.255967385052751E+61] +; CHECK-AVX1-NEXT: # xmm1 = mem[0,0] ; CHECK-AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1 ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; CHECK-AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3] -- 2.7.4