; CHECK-NEXT: store i32 3, ptr [[IDX]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6:[0-9]+]]
; CHECK-NEXT: unreachable
;
%1 = tail call ptr @malloc(i64 32)
; CHECK-NEXT: store i32 3, ptr [[IDX]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = tail call ptr @calloc(i64 4, i64 %x)
; CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[IDX]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = tail call ptr @realloc(ptr null, i64 %x) nounwind
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[IDX]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%idx = getelementptr inbounds [8 x i8], ptr @.str, i64 0, i64 %x
; CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr addrspace(1) [[IDX]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%idx = getelementptr inbounds [8 x i8], ptr addrspace(1) @.str_as1, i64 0, i64 %x
; CHECK-NEXT: [[TMP8:%.*]] = load i128, ptr [[TMP2]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = alloca i128, i64 %x
; CHECK-NEXT: [[TMP12:%.*]] = load i128, ptr [[TMP6]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = alloca i128, i64 %x
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP1]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = getelementptr inbounds i8, ptr %x, i64 16
; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr addrspace(1) [[TMP1]], align 4
; CHECK-NEXT: ret void
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = getelementptr inbounds i8, ptr addrspace(1) %x, i16 16
; CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP4]], align 8
; CHECK-NEXT: ret i64 [[TMP12]]
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
%1 = tail call ptr @calloc(i64 1, i64 %x)
; CHECK-NEXT: [[RET:%.*]] = load i8, ptr [[P]], align 1
; CHECK-NEXT: ret i8 [[RET]]
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
entry:
; CHECK-NEXT: [[RET:%.*]] = load i8, ptr [[ALLOC]], align 1
; CHECK-NEXT: ret i8 [[RET]]
; CHECK: trap:
-; CHECK-NEXT: call void @llvm.trap() #[[ATTR5]]
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
; CHECK-NEXT: unreachable
;
entry:
%ret = load i8, ptr %alloc
ret i8 %ret
}
+
+define <4 x i32> @load_vector(i64 %y) nounwind {
+; CHECK-LABEL: @load_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 256)
+; CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 0, [[DOTIDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y]]
+; CHECK-NEXT: [[TMP4:%.*]] = sub i64 256, [[TMP2]]
+; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i64 256, [[TMP2]]
+; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i64 [[TMP4]], 16
+; CHECK-NEXT: [[TMP7:%.*]] = or i1 [[TMP5]], [[TMP6]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[TRAP:%.*]], label [[TMP8:%.*]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i32>, ptr [[TMP3]], align 8
+; CHECK-NEXT: ret <4 x i32> [[TMP9]]
+; CHECK: trap:
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
+; CHECK-NEXT: unreachable
+;
+ %1 = tail call ptr @calloc(i64 1, i64 256)
+ %2 = getelementptr inbounds i64, ptr %1, i64 %y
+ %3 = load <4 x i32>, ptr %2, align 8
+ ret <4 x i32> %3
+}
+
+define <vscale x 1 x i32> @load_scalable_vector(i64 %y) nounwind {
+; CHECK-LABEL: @load_scalable_vector(
+; CHECK-NEXT: [[TMP1:%.*]] = tail call ptr @calloc(i64 1, i64 256)
+; CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[Y:%.*]], 8
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 0, [[DOTIDX]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i64 [[Y]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; CHECK-NEXT: [[TMP6:%.*]] = sub i64 256, [[TMP2]]
+; CHECK-NEXT: [[TMP7:%.*]] = icmp ult i64 256, [[TMP2]]
+; CHECK-NEXT: [[TMP8:%.*]] = icmp ult i64 [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP7]], [[TMP8]]
+; CHECK-NEXT: br i1 [[TMP9]], label [[TRAP:%.*]], label [[TMP10:%.*]]
+; CHECK: 10:
+; CHECK-NEXT: [[TMP11:%.*]] = load <vscale x 1 x i32>, ptr [[TMP3]], align 8
+; CHECK-NEXT: ret <vscale x 1 x i32> [[TMP11]]
+; CHECK: trap:
+; CHECK-NEXT: call void @llvm.trap() #[[ATTR6]]
+; CHECK-NEXT: unreachable
+;
+ %1 = tail call ptr @calloc(i64 1, i64 256)
+ %2 = getelementptr inbounds i64, ptr %1, i64 %y
+ %3 = load <vscale x 1 x i32>, ptr %2, align 8
+ ret <vscale x 1 x i32> %3
+}