; CHECK-NEXT: .byte 4 // 0x4
; CHECK-NEXT: .byte 8 // 0x8
; CHECK-NEXT: .byte 12 // 0xc
+; CHECK-NEXT: .byte 255 // 0xff
+; CHECK-NEXT: .byte 255 // 0xff
+; CHECK-NEXT: .byte 255 // 0xff
+; CHECK-NEXT: .byte 255 // 0xff
+
+define <8 x i8> @shuffled_tbl2_to_tbl4_v8i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
+; CHECK-LABEL: shuffled_tbl2_to_tbl4_v8i8:
+; CHECK: // %bb.0:
+; CHECK-NEXT: adrp x8, .LCPI8_0
+; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: ldr d4, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT: tbl.8b v0, { v0, v1 }, v4
+; CHECK-NEXT: tbl.8b v1, { v2, v3 }, v4
+; CHECK-NEXT: mov.s v0[1], v1[1]
+; CHECK-NEXT: // kill: def $d0 killed $d0 killed $q0
+; CHECK-NEXT: ret
+ %t1 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %a, <16 x i8> %b, <8 x i8> <i8 0, i8 4, i8 8, i8 12, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %t2 = call <8 x i8> @llvm.aarch64.neon.tbl2.v8i8(<16 x i8> %c, <16 x i8> %d, <8 x i8> <i8 0, i8 4, i8 8, i8 12, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %s = shufflevector <8 x i8> %t1, <8 x i8> %t2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i8> %s
+}
+
+; CHECK-LABEL: .LCPI9_0:
+; CHECK-NEXT: .byte 0 // 0x0
+; CHECK-NEXT: .byte 4 // 0x4
+; CHECK-NEXT: .byte 8 // 0x8
+; CHECK-NEXT: .byte 12 // 0xc
; CHECK-NEXT: .byte 16 // 0x10
; CHECK-NEXT: .byte 20 // 0x14
; CHECK-NEXT: .byte 24 // 0x18
define <16 x i8> @shuffled_tbl2_to_tbl4(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: shuffled_tbl2_to_tbl4:
; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, .LCPI8_0
+; CHECK-NEXT: adrp x8, .LCPI9_0
; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
-; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI8_0]
+; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI9_0]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
; CHECK-NEXT: tbl.16b v1, { v2, v3 }, v4
; CHECK-NEXT: mov.d v0[1], v1[0]
ret <16 x i8> %s
}
+define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_first_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) {
+; CHECK-LABEL: shuffled_tbl2_to_tbl4_nonconst_first_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi.2d v4, #0xffffffffffffffff
+; CHECK-NEXT: adrp x8, .LCPI10_0
+; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI10_0]
+; CHECK-NEXT: mov.b v4[0], w0
+; CHECK-NEXT: tbl.16b v2, { v2, v3 }, v5
+; CHECK-NEXT: mov.b v4[1], w0
+; CHECK-NEXT: mov.b v4[2], w0
+; CHECK-NEXT: mov.b v4[3], w0
+; CHECK-NEXT: mov.b v4[4], w0
+; CHECK-NEXT: mov.b v4[5], w0
+; CHECK-NEXT: mov.b v4[6], w0
+; CHECK-NEXT: mov.b v4[7], w0
+; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
+; CHECK-NEXT: mov.d v0[1], v2[0]
+; CHECK-NEXT: ret
+ %ins.0 = insertelement <16 x i8> poison, i8 %v, i32 0
+ %ins.1 = insertelement <16 x i8> %ins.0, i8 %v, i32 1
+ %ins.2 = insertelement <16 x i8> %ins.1, i8 %v, i32 2
+ %ins.3 = insertelement <16 x i8> %ins.2, i8 %v, i32 3
+ %ins.4 = insertelement <16 x i8> %ins.3, i8 %v, i32 4
+ %ins.5 = insertelement <16 x i8> %ins.4, i8 %v, i32 5
+ %ins.6 = insertelement <16 x i8> %ins.5, i8 %v, i32 6
+ %ins.7 = insertelement <16 x i8> %ins.6, i8 %v, i32 7
+ %ins.8 = insertelement <16 x i8> %ins.7, i8 -1, i32 8
+ %ins.9 = insertelement <16 x i8> %ins.8, i8 -1, i32 9
+ %ins.10 = insertelement <16 x i8> %ins.9, i8 -1, i32 10
+ %ins.11 = insertelement <16 x i8> %ins.10, i8 -1, i32 11
+ %ins.12 = insertelement <16 x i8> %ins.11, i8 -1, i32 12
+ %ins.13 = insertelement <16 x i8> %ins.12, i8 -1, i32 13
+ %ins.14 = insertelement <16 x i8> %ins.13, i8 -1, i32 14
+ %ins.15 = insertelement <16 x i8> %ins.14, i8 -1, i32 15
+ %t1 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %ins.15)
+ %t2 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %c, <16 x i8> %d, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %s = shufflevector <16 x i8> %t1, <16 x i8> %t2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i8> %s
+}
+
+define <16 x i8> @shuffled_tbl2_to_tbl4_nonconst_second_mask(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d, i8 %v) {
+; CHECK-LABEL: shuffled_tbl2_to_tbl4_nonconst_second_mask:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movi.2d v4, #0xffffffffffffffff
+; CHECK-NEXT: adrp x8, .LCPI11_0
+; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
+; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
+; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI11_0]
+; CHECK-NEXT: mov.b v4[0], w0
+; CHECK-NEXT: tbl.16b v2, { v2, v3 }, v5
+; CHECK-NEXT: mov.b v4[1], w0
+; CHECK-NEXT: mov.b v4[2], w0
+; CHECK-NEXT: mov.b v4[3], w0
+; CHECK-NEXT: mov.b v4[4], w0
+; CHECK-NEXT: mov.b v4[5], w0
+; CHECK-NEXT: mov.b v4[6], w0
+; CHECK-NEXT: mov.b v4[7], w0
+; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
+; CHECK-NEXT: mov.d v2[1], v0[0]
+; CHECK-NEXT: mov.16b v0, v2
+; CHECK-NEXT: ret
+ %ins.0 = insertelement <16 x i8> poison, i8 %v, i32 0
+ %ins.1 = insertelement <16 x i8> %ins.0, i8 %v, i32 1
+ %ins.2 = insertelement <16 x i8> %ins.1, i8 %v, i32 2
+ %ins.3 = insertelement <16 x i8> %ins.2, i8 %v, i32 3
+ %ins.4 = insertelement <16 x i8> %ins.3, i8 %v, i32 4
+ %ins.5 = insertelement <16 x i8> %ins.4, i8 %v, i32 5
+ %ins.6 = insertelement <16 x i8> %ins.5, i8 %v, i32 6
+ %ins.7 = insertelement <16 x i8> %ins.6, i8 %v, i32 7
+ %ins.8 = insertelement <16 x i8> %ins.7, i8 -1, i32 8
+ %ins.9 = insertelement <16 x i8> %ins.8, i8 -1, i32 9
+ %ins.10 = insertelement <16 x i8> %ins.9, i8 -1, i32 10
+ %ins.11 = insertelement <16 x i8> %ins.10, i8 -1, i32 11
+ %ins.12 = insertelement <16 x i8> %ins.11, i8 -1, i32 12
+ %ins.13 = insertelement <16 x i8> %ins.12, i8 -1, i32 13
+ %ins.14 = insertelement <16 x i8> %ins.13, i8 -1, i32 14
+ %ins.15 = insertelement <16 x i8> %ins.14, i8 -1, i32 15
+ %t1 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %c, <16 x i8> %d, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
+ %t2 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %ins.15)
+ %s = shufflevector <16 x i8> %t1, <16 x i8> %t2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ ret <16 x i8> %s
+}
+
define <16 x i8> @shuffled_tbl2_to_tbl4_incompatible_shuffle(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: shuffled_tbl2_to_tbl4_incompatible_shuffle:
; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, .LCPI9_0
+; CHECK-NEXT: adrp x8, .LCPI12_0
; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
-; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI9_0]
-; CHECK-NEXT: adrp x8, .LCPI9_1
+; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI12_0]
+; CHECK-NEXT: adrp x8, .LCPI12_1
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
; CHECK-NEXT: tbl.16b v1, { v2, v3 }, v4
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI9_1]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI12_1]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v2
; CHECK-NEXT: ret
%t1 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
define <16 x i8> @shuffled_tbl2_to_tbl4_incompatible_tbl2_mask1(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: shuffled_tbl2_to_tbl4_incompatible_tbl2_mask1:
; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, .LCPI10_0
-; CHECK-NEXT: adrp x9, .LCPI10_1
+; CHECK-NEXT: adrp x8, .LCPI13_0
+; CHECK-NEXT: adrp x9, .LCPI13_1
; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
-; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI10_0]
-; CHECK-NEXT: adrp x8, .LCPI10_2
-; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI10_1]
+; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI13_0]
+; CHECK-NEXT: adrp x8, .LCPI13_2
+; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI13_1]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
; CHECK-NEXT: tbl.16b v1, { v2, v3 }, v5
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI10_2]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI13_2]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v2
; CHECK-NEXT: ret
%t1 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 0, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
define <16 x i8> @shuffled_tbl2_to_tbl4_incompatible_tbl2_mask2(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: shuffled_tbl2_to_tbl4_incompatible_tbl2_mask2:
; CHECK: // %bb.0:
-; CHECK-NEXT: adrp x8, .LCPI11_0
-; CHECK-NEXT: adrp x9, .LCPI11_1
+; CHECK-NEXT: adrp x8, .LCPI14_0
+; CHECK-NEXT: adrp x9, .LCPI14_1
; CHECK-NEXT: // kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q3 killed $q3 killed $q2_q3 def $q2_q3
; CHECK-NEXT: // kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1
; CHECK-NEXT: // kill: def $q2 killed $q2 killed $q2_q3 def $q2_q3
-; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI11_0]
-; CHECK-NEXT: adrp x8, .LCPI11_2
-; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI11_1]
+; CHECK-NEXT: ldr q4, [x8, :lo12:.LCPI14_0]
+; CHECK-NEXT: adrp x8, .LCPI14_2
+; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI14_1]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v4
; CHECK-NEXT: tbl.16b v1, { v2, v3 }, v5
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI11_2]
+; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI14_2]
; CHECK-NEXT: tbl.16b v0, { v0, v1 }, v2
; CHECK-NEXT: ret
%t1 = call <16 x i8> @llvm.aarch64.neon.tbl2.v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> <i8 0, i8 4, i8 8, i8 12, i8 16, i8 20, i8 24, i8 28, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)