; address spaces
define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
; CHECK-LABEL: @constant_from_offset_cast_generic_null(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
-; CHECK-NEXT: store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT: ret void
;
- %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 6), align 1
- store i8 %load, i8 addrspace(1)* undef
+ %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
+ store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_offset_cast_global_null() {
; CHECK-LABEL: @constant_from_offset_cast_global_null(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
-; CHECK-NEXT: store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT: ret void
;
- %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 6), align 1
- store i8 %load, i8 addrspace(1)* undef
+ %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
+ store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
; CHECK-LABEL: @constant_from_offset_cast_global_gv(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* getelementptr inbounds ([64 x i8], [64 x i8] addrspace(1)* @gv, i32 0, i32 0) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
-; CHECK-NEXT: store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT: ret void
;
- %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast ([64 x i8] addrspace(1)* @gv to i8 addrspace(4)*), i64 6), align 1
- store i8 %load, i8 addrspace(1)* undef
+ %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
+ store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
; CHECK-LABEL: @constant_from_offset_cast_generic_inttoptr(
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
-; CHECK-NEXT: store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; CHECK-NEXT: ret void
;
- %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 6), align 1
- store i8 %load, i8 addrspace(1)* undef
+ %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
+ store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_inttoptr() {
; CHECK-LABEL: @constant_from_inttoptr(
-; CHECK-NEXT: [[LOAD:%.*]] = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 4
-; CHECK-NEXT: store i8 [[LOAD]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
+; CHECK-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; CHECK-NEXT: ret void
;
- %load = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 1
- store i8 %load, i8 addrspace(1)* undef
+ %load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
+ store i8 %load, ptr addrspace(1) undef
ret void
}