BUILTIN(__builtin_amdgcn_workgroup_size_y, "Us", "nc")
BUILTIN(__builtin_amdgcn_workgroup_size_z, "Us", "nc")
+BUILTIN(__builtin_amdgcn_grid_size_x, "Ui", "nc")
+BUILTIN(__builtin_amdgcn_grid_size_y, "Ui", "nc")
+BUILTIN(__builtin_amdgcn_grid_size_z, "Ui", "nc")
+
BUILTIN(__builtin_amdgcn_mbcnt_hi, "UiUiUi", "nc")
BUILTIN(__builtin_amdgcn_mbcnt_lo, "UiUiUi", "nc")
llvm::MDNode::get(CGF.getLLVMContext(), None));
return LD;
}
+
+// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
+Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
+ const unsigned XOffset = 12;
+ auto *DP = EmitAMDGPUDispatchPtr(CGF);
+ // Indexing the HSA kernel_dispatch_packet struct.
+ auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
+ auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *DstTy =
+ CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
+ auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
+ auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
+ LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGF.getLLVMContext(), None));
+ return LD;
+}
} // namespace
// For processing memory ordering and memory scope arguments of various
case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
return EmitAMDGPUWorkGroupSize(*this, 2);
+ // amdgcn grid size
+ case AMDGPU::BI__builtin_amdgcn_grid_size_x:
+ return EmitAMDGPUGridSize(*this, 0);
+ case AMDGPU::BI__builtin_amdgcn_grid_size_y:
+ return EmitAMDGPUGridSize(*this, 1);
+ case AMDGPU::BI__builtin_amdgcn_grid_size_z:
+ return EmitAMDGPUGridSize(*this, 2);
+
// r600 intrinsics
case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
}
}
+// CHECK-LABEL: @test_get_grid_size(
+// CHECK: call align 4 dereferenceable(64) i8 addrspace(4)* @llvm.amdgcn.dispatch.ptr()
+// CHECK: getelementptr i8, i8 addrspace(4)* %{{.*}}, i64 12
+// CHECK: load i32, i32 addrspace(4)* %{{.*}}, align 4, !invariant.load
+// CHECK: getelementptr i8, i8 addrspace(4)* %{{.*}}, i64 16
+// CHECK: load i32, i32 addrspace(4)* %{{.*}}, align 4, !invariant.load
+// CHECK: getelementptr i8, i8 addrspace(4)* %{{.*}}, i64 20
+// CHECK: load i32, i32 addrspace(4)* %{{.*}}, align 4, !invariant.load
+void test_get_grid_size(int d, global int *out)
+{
+ switch (d) {
+ case 0: *out = __builtin_amdgcn_grid_size_x(); break;
+ case 1: *out = __builtin_amdgcn_grid_size_y(); break;
+ case 2: *out = __builtin_amdgcn_grid_size_z(); break;
+ default: *out = 0;
+ }
+}
+
// CHECK-LABEL: @test_fmed3_f32
// CHECK: call float @llvm.amdgcn.fmed3.f32(
void test_fmed3_f32(global float* out, float a, float b, float c)
}
namespace {
-DEVICE uint32_t grid_size_x() {
- size_t grid_size_x_offset = 96; // In bits, from AQL kernel dispatch format
- return *(uint32_t *)((char *)__builtin_amdgcn_dispatch_ptr() +
- grid_size_x_offset / 8);
-}
-
DEVICE uint32_t get_grid_dim(uint32_t n, uint16_t d) {
uint32_t q = n / d;
return q + (n > q * d);
} // namespace
DEVICE int GetNumberOfBlocksInKernel() {
- return get_grid_dim(grid_size_x(), __builtin_amdgcn_workgroup_size_x());
+ return get_grid_dim(__builtin_amdgcn_grid_size_x(), __builtin_amdgcn_workgroup_size_x());
}
DEVICE int GetNumberOfThreadsInBlock() {
- return get_workgroup_dim(__builtin_amdgcn_workgroup_id_x(), grid_size_x(),
+ return get_workgroup_dim(__builtin_amdgcn_workgroup_id_x(), __builtin_amdgcn_grid_size_x(),
__builtin_amdgcn_workgroup_size_x());
}