define i8* @translate_element_size1(i64 %arg) {
; CHECK-LABEL: name: translate_element_size1
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[C]], [[COPY]](s64)
- ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
- ; CHECK: $x0 = COPY [[COPY1]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(p0) = G_CONSTANT i64 0
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[C]], [[COPY]](s64)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%tmp = getelementptr i8, i8* null, i64 %arg
ret i8* %tmp
}
; CHECK-LABEL: name: first_offset_const
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
- ; CHECK: $x0 = COPY [[PTR_ADD]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[PTR_ADD]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type, %type* %addr, i32 1
ret %type* %res
}
; CHECK-LABEL: name: first_offset_trivial
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
- ; CHECK: $x0 = COPY [[COPY1]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
+ ; CHECK-NEXT: $x0 = COPY [[COPY1]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type, %type* %addr, i32 0
ret %type* %res
}
; CHECK-LABEL: name: first_offset_variable
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
- ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
- ; CHECK: $x0 = COPY [[COPY2]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
+ ; CHECK-NEXT: $x0 = COPY [[COPY2]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type, %type* %addr, i64 %idx
ret %type* %res
}
; CHECK-LABEL: name: first_offset_ext
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $w1, $x0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
- ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
- ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
- ; CHECK: $x0 = COPY [[COPY2]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $w1, $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
+ ; CHECK-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY1]](s32)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
+ ; CHECK-NEXT: $x0 = COPY [[COPY2]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type, %type* %addr, i32 %idx
ret %type* %res
}
; CHECK-LABEL: name: const_then_var
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C1]]
- ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[MUL]](s64)
- ; CHECK: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD1]](p0)
- ; CHECK: $x0 = COPY [[COPY2]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 272
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C1]]
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[MUL]](s64)
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY [[PTR_ADD1]](p0)
+ ; CHECK-NEXT: $x0 = COPY [[COPY2]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type1, %type1* %addr, i32 4, i32 1, i64 %idx
ret i32* %res
}
; CHECK-LABEL: name: var_then_const
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $x0, $x1
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
- ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
- ; CHECK: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
- ; CHECK: $x0 = COPY [[PTR_ADD1]](p0)
- ; CHECK: RET_ReallyLR implicit $x0
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY1]], [[C]]
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[MUL]](s64)
+ ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 40
+ ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p0) = G_PTR_ADD [[PTR_ADD]], [[C1]](s64)
+ ; CHECK-NEXT: $x0 = COPY [[PTR_ADD1]](p0)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0
%res = getelementptr %type1, %type1* %addr, i64 %idx, i32 2, i32 2
ret i32* %res
}
define <2 x i32*> @vec_gep_scalar_base(<2 x i64> %offs) {
; CHECK-LABEL: name: vec_gep_scalar_base
; CHECK: bb.1.entry:
- ; CHECK: liveins: $q0
- ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
- ; CHECK: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @arr
- ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[GV]](p0), [[GV]](p0)
- ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
- ; CHECK: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
- ; CHECK: [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY]], [[BUILD_VECTOR1]]
- ; CHECK: [[PTR_ADD:%[0-9]+]]:_(<2 x p0>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
- ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY [[PTR_ADD]](<2 x p0>)
- ; CHECK: $q0 = COPY [[COPY1]](<2 x p0>)
- ; CHECK: RET_ReallyLR implicit $q0
+ ; CHECK-NEXT: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $q0
+ ; CHECK-NEXT: [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @arr
+ ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[GV]](p0), [[GV]](p0)
+ ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
+ ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
+ ; CHECK-NEXT: [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY]], [[BUILD_VECTOR1]]
+ ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(<2 x p0>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<2 x p0>) = COPY [[PTR_ADD]](<2 x p0>)
+ ; CHECK-NEXT: $q0 = COPY [[COPY1]](<2 x p0>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
entry:
%0 = getelementptr inbounds [8 x i32], [8 x i32]* @arr, i64 0, <2 x i64> %offs
ret <2 x i32*> %0
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
; RUN: llc -mtriple=arm64-apple-ios %s -o - -global-isel -global-isel-abort=1 -stop-after=irtranslator | FileCheck %s
define i128 @func_i128(i128* %ptr) {
-; CHECK-LABEL: name: func_i128
-; CHECK: [[PTR:%.*]]:_(p0) = COPY $x0
-; CHECK: [[VAL:%.*]]:_(s128) = G_LOAD [[PTR]]
-; CHECK: [[LO:%.*]]:_(s64), [[HI:%.*]]:_(s64) = G_UNMERGE_VALUES [[VAL]]
-; CHECK: $x0 = COPY [[LO]]
-; CHECK: $x1 = COPY [[HI]]
-; CHECK: RET_ReallyLR
+ ; CHECK-LABEL: name: func_i128
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[COPY]](p0) :: (load (s128) from %ir.ptr)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[LOAD]](s128)
+ ; CHECK-NEXT: $x0 = COPY [[UV]](s64)
+ ; CHECK-NEXT: $x1 = COPY [[UV1]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $x1
%val = load i128, i128* %ptr
ret i128 %val
}
define <8 x float> @func_v8f32(<8 x float>* %ptr) {
-; CHECK-LABEL: name: func_v8f32
-; CHECK: [[PTR:%.*]]:_(p0) = COPY $x0
-; CHECK: [[VAL:%.*]]:_(<8 x s32>) = G_LOAD [[PTR]]
-; CHECK: [[LO:%.*]]:_(<4 x s32>), [[HI:%.*]]:_(<4 x s32>) = G_UNMERGE_VALUES [[VAL]]
-; CHECK: $q0 = COPY [[LO]]
-; CHECK: $q1 = COPY [[HI]]
-; CHECK: RET_ReallyLR
+ ; CHECK-LABEL: name: func_v8f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<8 x s32>) from %ir.ptr)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD]](<8 x s32>)
+ ; CHECK-NEXT: $q0 = COPY [[UV]](<4 x s32>)
+ ; CHECK-NEXT: $q1 = COPY [[UV1]](<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
%val = load <8 x float>, <8 x float>* %ptr
ret <8 x float> %val
}
; A bit weird, but s0-s5 is what SDAG does too.
define <6 x float> @func_v6f32(<6 x float>* %ptr) {
-; CHECK-LABEL: name: func_v6f32
-; CHECK: [[PTR:%.*]]:_(p0) = COPY $x0
-; CHECK: [[VAL:%.*]]:_(<6 x s32>) = G_LOAD [[PTR]]
-; CHECK: [[V1:%.*]]:_(s32), [[V2:%.*]]:_(s32), [[V3:%.*]]:_(s32), [[V4:%.*]]:_(s32), [[V5:%.*]]:_(s32), [[V6:%.*]]:_(s32) = G_UNMERGE_VALUES [[VAL]]
-; CHECK: $s0 = COPY [[V1]]
-; CHECK: $s1 = COPY [[V2]]
-; CHECK: $s2 = COPY [[V3]]
-; CHECK: $s3 = COPY [[V4]]
-; CHECK: $s4 = COPY [[V5]]
-; CHECK: $s5 = COPY [[V6]]
-; CHECK: RET_ReallyLR
+ ; CHECK-LABEL: name: func_v6f32
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<6 x s32>) = G_LOAD [[COPY]](p0) :: (load (<6 x s32>) from %ir.ptr, align 32)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<6 x s32>)
+ ; CHECK-NEXT: $s0 = COPY [[UV]](s32)
+ ; CHECK-NEXT: $s1 = COPY [[UV1]](s32)
+ ; CHECK-NEXT: $s2 = COPY [[UV2]](s32)
+ ; CHECK-NEXT: $s3 = COPY [[UV3]](s32)
+ ; CHECK-NEXT: $s4 = COPY [[UV4]](s32)
+ ; CHECK-NEXT: $s5 = COPY [[UV5]](s32)
+ ; CHECK-NEXT: RET_ReallyLR implicit $s0, implicit $s1, implicit $s2, implicit $s3, implicit $s4, implicit $s5
%val = load <6 x float>, <6 x float>* %ptr
ret <6 x float> %val
}
define i128 @ABIi128(i128 %arg1) {
-; CHECK-LABEL: name: ABIi128
-; CHECK: [[LO:%.*]]:_(s64) = COPY $x0
-; CHECK: [[HI:%.*]]:_(s64) = COPY $x1
-; CHECK: [[IN:%.*]]:_(s128) = G_MERGE_VALUES [[LO]](s64), [[HI]](s64)
-; CHECK: [[IN_FP:%.*]]:_(s128) = G_FPTOUI [[IN]](s128)
-; CHECK: [[LO_OUT:%.*]]:_(s64), [[HI_OUT:%.*]]:_(s64) = G_UNMERGE_VALUES [[IN_FP]](s128)
-; CHECK: $x0 = COPY [[LO_OUT]]
-; CHECK: $x1 = COPY [[HI_OUT]]
+ ; CHECK-LABEL: name: ABIi128
+ ; CHECK: bb.1 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $x0, $x1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
+ ; CHECK-NEXT: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[COPY]](s64), [[COPY1]](s64)
+ ; CHECK-NEXT: [[FPTOUI:%[0-9]+]]:_(s128) = G_FPTOUI [[MV]](s128)
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[FPTOUI]](s128)
+ ; CHECK-NEXT: $x0 = COPY [[UV]](s64)
+ ; CHECK-NEXT: $x1 = COPY [[UV1]](s64)
+ ; CHECK-NEXT: RET_ReallyLR implicit $x0, implicit $x1
%farg1 = bitcast i128 %arg1 to fp128
%res = fptoui fp128 %farg1 to i128
ret i128 %res
define <2 x half> @f16_vec_param(<2 x half> %v) {
; CHECK-LABEL: name: f16_vec_param
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $d0
- ; CHECK: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
- ; CHECK: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
- ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[UV]](<2 x s16>), [[DEF]](<2 x s16>)
- ; CHECK: $d0 = COPY [[CONCAT_VECTORS]](<4 x s16>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $d0
+ ; CHECK-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[UV]](<2 x s16>), [[DEF]](<2 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[CONCAT_VECTORS]](<4 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
ret <2 x half> %v
}
define <2 x i16> @i16_vec_param(<2 x i16> %v) {
; CHECK-LABEL: name: i16_vec_param
; CHECK: bb.1 (%ir-block.0):
- ; CHECK: liveins: $d0
- ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
- ; CHECK: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[COPY]](<2 x s32>)
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(<2 x s32>) = G_ANYEXT [[TRUNC]](<2 x s16>)
- ; CHECK: $d0 = COPY [[ANYEXT]](<2 x s32>)
- ; CHECK: RET_ReallyLR implicit $d0
+ ; CHECK-NEXT: liveins: $d0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[COPY]](<2 x s32>)
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(<2 x s32>) = G_ANYEXT [[TRUNC]](<2 x s16>)
+ ; CHECK-NEXT: $d0 = COPY [[ANYEXT]](<2 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $d0
ret <2 x i16> %v
}
;
; CHECK-LABEL: name: use_xro_cannot_encode_add_lsl
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %cst:gpr64 = MOVi64imm 4580179968
- ; CHECK: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %cst:gpr64 = MOVi64imm 4580179968
+ ; CHECK-NEXT: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 4580179968
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: use_xro_preferred_mov
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 61440
- ; CHECK: %cst:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
- ; CHECK: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 61440
+ ; CHECK-NEXT: %cst:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+ ; CHECK-NEXT: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 61440
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: use_xro_negative_imm
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %cst:gpr64 = MOVi64imm -61440
- ; CHECK: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %cst:gpr64 = MOVi64imm -61440
+ ; CHECK-NEXT: %load:gpr64 = LDRXroX %copy, %cst, 0, 0 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 -61440
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: dont_use_xro_selectable_imm
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %load:gpr64 = LDRXui %copy, 2 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %load:gpr64 = LDRXui %copy, 2 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 16
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: dont_use_xro_selectable_negative_imm
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %load:gpr64 = LDURXi %copy, -16 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %load:gpr64 = LDURXi %copy, -16 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 -16
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: dont_use_xro_zero
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %load:gpr64 = LDRXui %copy, 0 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %load:gpr64 = LDRXui %copy, 0 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 0
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: dont_use_xro_in_range
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64sp = COPY $x0
- ; CHECK: %load:gpr64 = LDURXi %copy, 17 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64sp = COPY $x0
+ ; CHECK-NEXT: %load:gpr64 = LDURXi %copy, 17 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 17
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)
;
; CHECK-LABEL: name: dont_use_xro_add_lsl
; CHECK: liveins: $x0
- ; CHECK: %copy:gpr64 = COPY $x0
- ; CHECK: [[COPY:%[0-9]+]]:gpr64common = COPY %copy
- ; CHECK: %addr:gpr64sp = ADDXri [[COPY]], 273, 12
- ; CHECK: %load:gpr64 = LDRXui %addr, 0 :: (volatile load (s64))
- ; CHECK: RET_ReallyLR
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %copy:gpr64 = COPY $x0
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64common = COPY %copy
+ ; CHECK-NEXT: %addr:gpr64sp = ADDXri [[COPY]], 273, 12
+ ; CHECK-NEXT: %load:gpr64 = LDRXui %addr, 0 :: (volatile load (s64))
+ ; CHECK-NEXT: RET_ReallyLR
%copy:gpr(p0) = COPY $x0
%cst:gpr(s64) = G_CONSTANT i64 1118208
%addr:gpr(p0) = G_PTR_ADD %copy, %cst(s64)