dEQP-VK.subgroups.builtin_var.compute.subgroupinvocationid_compute_requiredsubgroupsize
dEQP-VK.subgroups.builtin_var.compute.numsubgroups_requiredsubgroupsize
dEQP-VK.subgroups.builtin_var.compute.subgroupid_requiredsubgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupinvocationid
dEQP-VK.subgroups.builtin_mask_var.compute.subgroupeqmask_requiredsubgroupsize
dEQP-VK.subgroups.builtin_mask_var.compute.subgroupgemask_requiredsubgroupsize
dEQP-VK.subgroups.builtin_mask_var.compute.subgroupgtmask_requiredsubgroupsize
dEQP-VK.subgroups.builtin_mask_var.compute.subgrouplemask_requiredsubgroupsize
dEQP-VK.subgroups.builtin_mask_var.compute.subgroupltmask_requiredsubgroupsize
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupeqmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgtmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgrouplemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupltmask
dEQP-VK.subgroups.basic.compute.subgroupelect_requiredsubgroupsize
dEQP-VK.subgroups.basic.compute.subgroupbarrier_requiredsubgroupsize
dEQP-VK.subgroups.basic.compute.subgroupmemorybarrier_requiredsubgroupsize
dEQP-VK.subgroups.basic.compute.subgroupmemorybarrierbuffer_requiredsubgroupsize
dEQP-VK.subgroups.basic.compute.subgroupmemorybarriershared_requiredsubgroupsize
dEQP-VK.subgroups.basic.compute.subgroupmemorybarrierimage_requiredsubgroupsize
+dEQP-VK.subgroups.basic.ray_tracing.subgroupelect
+dEQP-VK.subgroups.basic.ray_tracing.subgroupbarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierbuffer
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierimage
dEQP-VK.subgroups.vote.compute.subgroupallequal_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.vote.compute.subgroupallequal_i8vec2_requiredsubgroupsize
dEQP-VK.subgroups.vote.compute.subgroupallequal_i8vec3_requiredsubgroupsize
dEQP-VK.subgroups.vote.compute.subgroupallequal_bvec2_requiredsubgroupsize
dEQP-VK.subgroups.vote.compute.subgroupallequal_bvec3_requiredsubgroupsize
dEQP-VK.subgroups.vote.compute.subgroupallequal_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i8vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint8_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u8vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i16vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint16_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_ivec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupall_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupany_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i64vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint64_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u64vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_f16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_float
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_double
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bool
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec2
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec4
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.compute.allinvocationsarb_uint_requiredsubgroupsize
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.compute.anyinvocationarb_uint_requiredsubgroupsize
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.compute.allinvocationsequalarb_bool_requiredsubgroupsize
dEQP-VK.subgroups.ballot.compute.compute_requiredsubgroupsize
+dEQP-VK.subgroups.ballot.ray_tracing.test
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.compute.compute_requiredsubgroupsize
dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcast_int8_t_requiredsubgroupsize1
dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcast_int8_t_requiredsubgroupsize2
dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcastfirst_bvec4_requiredsubgroupsize32
dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcastfirst_bvec4_requiredsubgroupsize64
dEQP-VK.subgroups.ballot_broadcast.compute.subgroupbroadcastfirst_bvec4_requiredsubgroupsize128
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec4
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcast_int_requiredsubgroupsize1
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcast_int_requiredsubgroupsize2
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcast_int_requiredsubgroupsize4
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcastfirst_float_requiredsubgroupsize32
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcastfirst_float_requiredsubgroupsize64
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.compute.subgroupbroadcastfirst_float_requiredsubgroupsize128
-dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupSize
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupinverseballot
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitextract
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotinclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotexclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindlsb
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindmsb
dEQP-VK.subgroups.arithmetic.compute.subgroupadd_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.arithmetic.compute.subgroupmul_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.arithmetic.compute.subgroupmin_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.arithmetic.compute.subgroupexclusiveand_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.arithmetic.compute.subgroupexclusiveor_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.arithmetic.compute.subgroupexclusivexor_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.clustered.compute.subgroupclusteredadd_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.clustered.compute.subgroupclusteredmul_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.clustered.compute.subgroupclusteredmin_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.clustered.compute.subgroupclusteredand_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.clustered.compute.subgroupclusteredor_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.clustered.compute.subgroupclusteredxor_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec4
dEQP-VK.subgroups.partitioned.compute.subgroupadd_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.partitioned.compute.subgroupmul_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.partitioned.compute.subgroupmin_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.partitioned.compute.subgroupexclusiveand_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.partitioned.compute.subgroupexclusiveor_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.partitioned.compute.subgroupexclusivexor_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.shuffle.compute.subgroupshuffle_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.shuffle.compute.subgroupshufflexor_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.shuffle.compute.subgroupshuffleup_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.shuffle.compute.subgroupshufflexor_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.shuffle.compute.subgroupshuffleup_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.shuffle.compute.subgroupshuffledown_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec4
dEQP-VK.subgroups.quad.compute.subgroupquadbroadcast_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.quad.compute.subgroupquadbroadcast_nonconst_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.quad.compute.subgroupquadswaphorizontal_int8_t_requiredsubgroupsize
dEQP-VK.subgroups.quad.compute.subgroupquadswaphorizontal_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.quad.compute.subgroupquadswapvertical_bvec4_requiredsubgroupsize
dEQP-VK.subgroups.quad.compute.subgroupquadswapdiagonal_bvec4_requiredsubgroupsize
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec4
dEQP-VK.subgroups.shape.compute.clustered_requiredsubgroupsize
dEQP-VK.subgroups.shape.compute.quad_requiredsubgroupsize
+dEQP-VK.subgroups.shape.ray_tracing.clustered
+dEQP-VK.subgroups.shape.ray_tracing.quad
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.compute.gl_subgroupeqmaskarb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.compute.gl_subgroupgemaskarb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.compute.gl_subgroupgtmaskarb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.compute.gl_subgrouplemaskarb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.compute.gl_subgroupltmaskarb_requiredsubgroupsize
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupeqmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgtmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgrouplemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupltmaskarb
dEQP-VK.subgroups.size_control.generic.subgroup_size_properties
dEQP-VK.subgroups.size_control.graphics.allow_varying_subgroup_size
dEQP-VK.subgroups.size_control.graphics.required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.geometry_required_subgroup_size_min
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_min
+dEQP-VK.subgroups.size_control.ray_tracing.allow_varying_subgroup_size
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_max
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_min
dEQP-VK.ycbcr.filtering.linear_sampler_g8_b8_r8_3plane_420_unorm
dEQP-VK.ycbcr.filtering.linear_sampler_with_chroma_linear_filtering_g8_b8_r8_3plane_420_unorm
dEQP-VK.ycbcr.filtering.linear_sampler_g8_b8r8_2plane_420_unorm
dEQP-VK.subgroups.builtin_var.compute.numsubgroups_requiredsubgroupsize
dEQP-VK.subgroups.builtin_var.compute.subgroupid
dEQP-VK.subgroups.builtin_var.compute.subgroupid_requiredsubgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupinvocationid
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_vertex
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_tess_eval
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_tess_control
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_tess_eval
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_tess_control
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_geometry
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupeqmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgtmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgrouplemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupltmask
dEQP-VK.subgroups.basic.graphics.subgroupelect
dEQP-VK.subgroups.basic.graphics.subgroupbarrier
dEQP-VK.subgroups.basic.graphics.subgroupmemorybarrier
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_tess_eval
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_tess_control
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_geometry
+dEQP-VK.subgroups.basic.ray_tracing.subgroupelect
+dEQP-VK.subgroups.basic.ray_tracing.subgroupbarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierbuffer
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierimage
dEQP-VK.subgroups.vote.graphics.subgroupallequal_int8_t
dEQP-VK.subgroups.vote.graphics.subgroupallequal_i8vec2
dEQP-VK.subgroups.vote.graphics.subgroupallequal_i8vec3
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec2_fragment
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec3_fragment
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec4_fragment
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i8vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint8_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u8vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i16vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint16_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_ivec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupall_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupany_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i64vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint64_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u64vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_f16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_float
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_double
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bool
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec2
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec4
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.allinvocationsarb_uint
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.anyinvocationarb_uint
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.allinvocationsequalarb_bool
dEQP-VK.subgroups.ballot.framebuffer.tess_control
dEQP-VK.subgroups.ballot.framebuffer.geometry
dEQP-VK.subgroups.ballot.framebuffer.vertex
+dEQP-VK.subgroups.ballot.ray_tracing.test
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.graphics.graphic
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.compute.compute
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.compute.compute_requiredsubgroupsize
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4tess_eval
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4tess_control
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4geometry
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec4
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcast_int
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcast_nonconst_int
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcastfirst_int
dEQP-VK.subgroups.ballot_other.graphics.subgroupballotfindlsb
dEQP-VK.subgroups.ballot_other.graphics.subgroupballotfindmsb
dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot
-dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_vertex
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_tess_eval
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_tess_control
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_tess_eval
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_tess_control
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_geometry
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupinverseballot
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitextract
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotinclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotexclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindlsb
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindmsb
dEQP-VK.subgroups.arithmetic.graphics.subgroupadd_int8_t
dEQP-VK.subgroups.arithmetic.graphics.subgroupmul_int8_t
dEQP-VK.subgroups.arithmetic.graphics.subgroupmin_int8_t
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_tess_eval
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_tess_control
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_geometry
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredadd_int8_t
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredmul_int8_t
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredmin_int8_t
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_tess_eval
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_tess_control
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_geometry
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec4
dEQP-VK.subgroups.partitioned.graphics.subgroupadd_int8_t
dEQP-VK.subgroups.partitioned.graphics.subgroupmul_int8_t
dEQP-VK.subgroups.partitioned.graphics.subgroupmin_int8_t
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_tess_eval
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_tess_control
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_geometry
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.shuffle.graphics.subgroupshuffle_int8_t
dEQP-VK.subgroups.shuffle.graphics.subgroupshufflexor_int8_t
dEQP-VK.subgroups.shuffle.graphics.subgroupshuffleup_int8_t
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_tess_eval
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_tess_control
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_geometry
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec4
dEQP-VK.subgroups.quad.graphics.subgroupquadbroadcast_int8_t
dEQP-VK.subgroups.quad.graphics.subgroupquadbroadcast_nonconst_int8_t
dEQP-VK.subgroups.quad.graphics.subgroupquadswaphorizontal_int8_t
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_tess_eval
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_tess_control
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_geometry
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec4
dEQP-VK.subgroups.shape.graphics.clustered
dEQP-VK.subgroups.shape.graphics.quad
dEQP-VK.subgroups.shape.compute.clustered
dEQP-VK.subgroups.shape.framebuffer.quad_tess_eval
dEQP-VK.subgroups.shape.framebuffer.quad_tess_control
dEQP-VK.subgroups.shape.framebuffer.quad_geometry
+dEQP-VK.subgroups.shape.ray_tracing.clustered
+dEQP-VK.subgroups.shape.ray_tracing.quad
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupeqmaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupgemaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupgtmaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_tess_eval
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_tess_control
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_geometry
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupeqmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgtmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgrouplemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupltmaskarb
dEQP-VK.subgroups.size_control.generic.subgroup_size_properties
dEQP-VK.subgroups.size_control.graphics.allow_varying_subgroup_size
dEQP-VK.subgroups.size_control.graphics.required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.geometry_required_subgroup_size_min
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_min
+dEQP-VK.subgroups.size_control.ray_tracing.allow_varying_subgroup_size
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_max
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_min
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_optimal
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_optimal_array
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_linear
else \
TCU_THROW(InternalError, "Attempt to reassign shader")
-void RayTracingPipeline::addShader (VkShaderStageFlagBits shaderStage, Move<VkShaderModule> shaderModule, deUint32 group, const VkSpecializationInfo* specializationInfo)
+void RayTracingPipeline::addShader (VkShaderStageFlagBits shaderStage,
+ Move<VkShaderModule> shaderModule,
+ deUint32 group,
+ const VkSpecializationInfo* specializationInfo,
+ const VkPipelineShaderStageCreateFlags pipelineShaderStageCreateFlags,
+ const void* pipelineShaderStageCreateInfopNext)
{
- addShader(shaderStage, makeVkSharedPtr(shaderModule), group, specializationInfo);
+ addShader(shaderStage, makeVkSharedPtr(shaderModule), group, specializationInfo, pipelineShaderStageCreateFlags, pipelineShaderStageCreateInfopNext);
}
-void RayTracingPipeline::addShader (VkShaderStageFlagBits shaderStage, de::SharedPtr<Move<VkShaderModule>> shaderModule, deUint32 group, const VkSpecializationInfo* specializationInfoPtr)
+void RayTracingPipeline::addShader (VkShaderStageFlagBits shaderStage,
+ de::SharedPtr<Move<VkShaderModule>> shaderModule,
+ deUint32 group,
+ const VkSpecializationInfo* specializationInfoPtr,
+ const VkPipelineShaderStageCreateFlags pipelineShaderStageCreateFlags,
+ const void* pipelineShaderStageCreateInfopNext)
{
if (group >= m_shadersGroupCreateInfos.size())
{
const VkPipelineShaderStageCreateInfo shaderCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
+ pipelineShaderStageCreateInfopNext, // const void* pNext;
+ pipelineShaderStageCreateFlags, // VkPipelineShaderStageCreateFlags flags;
shaderStage, // VkShaderStageFlagBits stage;
**shaderModule, // VkShaderModule module;
"main", // const char* pName;
namespace vk
{
+constexpr VkShaderStageFlags SHADER_STAGE_ALL_RAY_TRACING = VK_SHADER_STAGE_RAYGEN_BIT_KHR
+ | VK_SHADER_STAGE_ANY_HIT_BIT_KHR
+ | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
+ | VK_SHADER_STAGE_MISS_BIT_KHR
+ | VK_SHADER_STAGE_INTERSECTION_BIT_KHR
+ | VK_SHADER_STAGE_CALLABLE_BIT_KHR;
+
const VkTransformMatrixKHR identityMatrix3x4 = { { { 1.0f, 0.0f, 0.0f, 0.0f }, { 0.0f, 1.0f, 0.0f, 0.0f }, { 0.0f, 0.0f, 1.0f, 0.0f } } };
template<typename T>
void addShader (VkShaderStageFlagBits shaderStage,
Move<VkShaderModule> shaderModule,
deUint32 group,
- const VkSpecializationInfo* specializationInfo = nullptr);
+ const VkSpecializationInfo* specializationInfo = nullptr,
+ const VkPipelineShaderStageCreateFlags pipelineShaderStageCreateFlags = static_cast<VkPipelineShaderStageCreateFlags>(0),
+ const void* pipelineShaderStageCreateInfopNext = nullptr);
void addShader (VkShaderStageFlagBits shaderStage,
de::SharedPtr<Move<VkShaderModule>> shaderModule,
deUint32 group,
- const VkSpecializationInfo* specializationInfoPtr = nullptr);
+ const VkSpecializationInfo* specializationInfoPtr = nullptr,
+ const VkPipelineShaderStageCreateFlags pipelineShaderStageCreateFlags = static_cast<VkPipelineShaderStageCreateFlags>(0),
+ const void* pipelineShaderStageCreateInfopNext = nullptr);
void addLibrary (de::SharedPtr<de::MovePtr<RayTracingPipeline>> pipelineLibrary);
Move<VkPipeline> createPipeline (const DeviceInterface& vk,
const VkDevice device,
return primitiveTopologyCastToList(primitiveTopology) == VK_PRIMITIVE_TOPOLOGY_PATCH_LIST;
}
+inline bool isAllInStage (const VkShaderStageFlags shaderStageFlags, const VkShaderStageFlags stageMask)
+{
+ return (shaderStageFlags & stageMask) != 0 && ((shaderStageFlags & ~stageMask) == 0);
+}
+
+inline bool isAllComputeStages (const VkShaderStageFlags shaderStageFlags)
+{
+ return isAllInStage(shaderStageFlags, VK_SHADER_STAGE_COMPUTE_BIT);
+}
+
+inline bool isAllGraphicsStages (const VkShaderStageFlags shaderStageFlags)
+{
+ return isAllInStage(shaderStageFlags, VK_SHADER_STAGE_ALL_GRAPHICS);
+}
+
+inline bool isAllRayTracingStages (const VkShaderStageFlags shaderStageFlags)
+{
+ const VkShaderStageFlags rayTracingStageFlags = VK_SHADER_STAGE_RAYGEN_BIT_KHR
+ | VK_SHADER_STAGE_ANY_HIT_BIT_KHR
+ | VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR
+ | VK_SHADER_STAGE_MISS_BIT_KHR
+ | VK_SHADER_STAGE_INTERSECTION_BIT_KHR
+ | VK_SHADER_STAGE_CALLABLE_BIT_KHR;
+
+ return isAllInStage(shaderStageFlags, rayTracingStageFlags);
+}
+
} // vk
#endif // _VKTYPEUTIL_HPP
OPTYPE_LAST
};
-static Operator getOperator(OpType t)
+struct CaseDefinition
+{
+ Operator op;
+ ScanType scanType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static Operator getOperator (OpType opType)
{
- switch (t)
+ switch (opType)
{
case OPTYPE_ADD:
case OPTYPE_INCLUSIVE_ADD:
}
}
-static ScanType getScanType(OpType t)
+static ScanType getScanType(OpType opType)
{
- switch (t)
+ switch (opType)
{
case OPTYPE_ADD:
case OPTYPE_MUL:
}
}
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0x3);
+
+ return subgroups::check(datas, width, 0x3);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0x3);
-}
-std::string getOpTypeName(Operator op, ScanType scanType)
-{
- return getScanOpName("subgroup", "", op, scanType);
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0x3);
}
-struct CaseDefinition
+string getOpTypeName (Operator op, ScanType scanType)
{
- Operator op;
- ScanType scanType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
+ return getScanOpName("subgroup", "", op, scanType);
+}
-std::string getExtHeader(CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition& caseDef)
{
return "#extension GL_KHR_shader_subgroup_arithmetic: enable\n"
"#extension GL_KHR_shader_subgroup_ballot: enable\n" +
subgroups::getAdditionalExtensionForFormat(caseDef.format);
}
-std::string getIndexVars(CaseDefinition caseDef)
+string getIndexVars (const CaseDefinition& caseDef)
{
switch (caseDef.scanType)
{
- case SCAN_REDUCE:
- return " uint start = 0, end = gl_SubgroupSize;\n";
- case SCAN_INCLUSIVE:
- return " uint start = 0, end = gl_SubgroupInvocationID + 1;\n";
- case SCAN_EXCLUSIVE:
- return " uint start = 0, end = gl_SubgroupInvocationID;\n";
+ case SCAN_REDUCE: return " uint start = 0, end = gl_SubgroupSize;\n";
+ case SCAN_INCLUSIVE: return " uint start = 0, end = gl_SubgroupInvocationID + 1;\n";
+ case SCAN_EXCLUSIVE: return " uint start = 0, end = gl_SubgroupInvocationID;\n";
+ default: TCU_THROW(InternalError, "Unreachable");
}
- DE_FATAL("Unreachable");
- return "";
}
-std::string getTestSrc(CaseDefinition caseDef)
+string getTestSrc (const CaseDefinition& caseDef)
{
- std::string indexVars = getIndexVars(caseDef);
+ const string indexVars = getIndexVars(caseDef);
return " uvec4 mask = subgroupBallot(true);\n"
+ indexVars +
" }\n";
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
+ const bool spirv14required = isAllRayTracingStages(caseDef.shaderStage);
+ const SpirvVersion spirvVersion = spirv14required ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
}
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData =
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd140;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData =
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
-
- return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData,
- 1, DE_NULL, checkVertexPipelineStages, stages);
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsArithmeticTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsArithmeticTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup arithmetic category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "arithmetic", "Subgroup arithmetic category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup arithmetic category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup arithmetic category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- bool isBool = subgroups::isFormatBool(format);
- bool isFloat = subgroups::isFormatFloat(format);
-
- OpType opType = static_cast<OpType>(opTypeIndex);
- Operator op = getOperator(opType);
- ScanType st = getScanType(opType);
-
- bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
-
- // Skip float with bitwise category.
- if (isFloat && isBitwiseOp)
- continue;
-
- // Skip bool when its not the bitwise category.
- if (isBool && !isBitwiseOp)
- continue;
-
- const std::string name = de::toLower(getOpTypeName(op, st)) + "_" + subgroups::getFormatNameForGLSL(format);
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), name,
- "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize",
- "", supportedCheck, initPrograms, test, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const ScanType st = getScanType(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ const string name = de::toLower(getOpTypeName(op, st)) + "_" + formatName;
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
}
+ }
+ }
- {
- const CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(), name,
- "", supportedCheck, initPrograms, test, caseDef);
- }
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef = {op, st, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name +
- "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const ScanType st = getScanType(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string name = de::toLower(getOpTypeName(op, st)) + "_" + formatName;
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
}
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "arithmetic", "Subgroup arithmetic category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsArithmeticTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsArithmeticTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool extShaderSubGroupBallotTests;
+ deBool subgroupSizeControl;
+ deUint32 requiredSubgroupSize;
+};
+
+bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 3);
+
+ return subgroups::check(datas, width, 3);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 3);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 3);
}
-std::string getOpTypeCaseName(int opType)
+string getOpTypeCaseName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_BROADCAST:
- return "subgroupbroadcast";
- case OPTYPE_BROADCAST_NONCONST:
- return "subgroupbroadcast_nonconst";
- case OPTYPE_BROADCAST_FIRST:
- return "subgroupbroadcastfirst";
+ case OPTYPE_BROADCAST: return "subgroupbroadcast";
+ case OPTYPE_BROADCAST_NONCONST: return "subgroupbroadcast_nonconst";
+ case OPTYPE_BROADCAST_FIRST: return "subgroupbroadcastfirst";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-
-struct CaseDefinition
-{
- int opType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool extShaderSubGroupBallotTests;
- deBool subgroupSizeControl;
- int requiredSubgroupSize;
-};
-
-std::string getExtHeader(CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition& caseDef)
{
return (caseDef.extShaderSubGroupBallotTests ? "#extension GL_ARB_shader_ballot: enable\n"
"#extension GL_KHR_shader_subgroup_basic: enable\n"
+ subgroups::getAdditionalExtensionForFormat(caseDef.format);
}
-std::string getTestSrc(const CaseDefinition &caseDef)
+string getTestSrc (const CaseDefinition &caseDef)
{
- std::ostringstream bdy;
+ ostringstream bdy;
+ string broadcast;
+ string broadcastFirst;
+ string mask;
+ int max;
+ const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
- std::string broadcast;
- std::string broadcastFirst;
- std::string mask;
- int max;
if (caseDef.extShaderSubGroupBallotTests)
{
broadcast = "readInvocationARB";
broadcastFirst = "readFirstInvocationARB";
mask = "mask = ballotARB(true);\n";
- max = 64;
+ max = 64;
bdy << " uint64_t mask;\n"
<< mask
<< " uint sgInvocation = gl_SubgroupInvocationID;\n";
}
- const std::string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
-
if (caseDef.opType == OPTYPE_BROADCAST)
{
bdy << " tempRes = 0x3;\n"
}
else if (caseDef.opType == OPTYPE_BROADCAST_NONCONST)
{
- const std::string validate = " if (subgroupBallotBitExtract(mask, id) && op != data[id])\n"
- " tempRes = 0;\n";
+ const string validate = " if (subgroupBallotBitExtract(mask, id) && op != data[id])\n"
+ " tempRes = 0;\n";
bdy << " tempRes= 0x3;\n"
<< " for (uint id = 0; id < sgSize; id++)\n"
<< validate
<< " }\n";
}
- else
+ else if (caseDef.opType == OPTYPE_BROADCAST_FIRST)
{
bdy << " tempRes = 0;\n"
<< " uint firstActive = 0;\n"
<< " tempRes |= 0x2;\n"
<< " }\n";
}
+ else
+ TCU_THROW(InternalError, "Unknown operation type");
+
return bdy.str();
}
-std::string getHelperFunctionARB(const CaseDefinition &caseDef)
+string getHelperFunctionARB (const CaseDefinition &caseDef)
{
- std::ostringstream bdy;
+ ostringstream bdy;
if (caseDef.extShaderSubGroupBallotTests == DE_FALSE)
return "";
bdy << " return true;\n";
bdy << " return false;\n";
bdy << "}\n";
- return bdy.str();
+
+ return bdy.str();
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_BROADCAST_NONCONST) ? vk::SPIRV_VERSION_1_5 : vk::SPIRV_VERSION_1_3;
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_BROADCAST_NONCONST) ? SPIRV_VERSION_1_5 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
+ const string helperStr = getHelperFunctionARB(caseDef);
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
- std::string helperStr = getHelperFunctionARB(caseDef);
-
- subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, helperStr);
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, helperStr);
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_BROADCAST_NONCONST) ? vk::SPIRV_VERSION_1_5 : vk::SPIRV_VERSION_1_3;
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
-
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
- std::string helperStr = getHelperFunctionARB(caseDef);
+ const bool spirv15required = caseDef.opType == OPTYPE_BROADCAST_NONCONST;
+ const bool spirv14required = isAllRayTracingStages(caseDef.shaderStage);
+ const SpirvVersion spirvVersion = spirv15required ? SPIRV_VERSION_1_5
+ : spirv14required ? SPIRV_VERSION_1_4
+ : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
+ const string helperStr = getHelperFunctionARB(caseDef);
subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, helperStr);
}
if (!subgroups::isFormatSupportedForDevice(context, caseDef.format))
TCU_THROW(NotSupportedError, "Device does not support the specified format in subgroup operations");
- if (caseDef.extShaderSubGroupBallotTests && !context.requireDeviceFunctionality("VK_EXT_shader_subgroup_ballot"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_shader_subgroup_ballot extension");
+ if (caseDef.extShaderSubGroupBallotTests)
+ {
+ context.requireDeviceFunctionality("VK_EXT_shader_subgroup_ballot");
- if (caseDef.extShaderSubGroupBallotTests && !subgroups::isInt64SupportedForDevice(context))
- TCU_THROW(NotSupportedError, "Device does not support int64 data types");
+ if (!subgroups::isInt64SupportedForDevice(context))
+ TCU_THROW(NotSupportedError, "Device does not support int64 data types");
+ }
if ((caseDef.opType == OPTYPE_BROADCAST_NONCONST) && !subgroups::isSubgroupBroadcastDynamicIdSupported(context))
TCU_THROW(NotSupportedError, "Device does not support SubgroupBroadcastDynamicId");
if (caseDef.subgroupSizeControl)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- if (caseDef.requiredSubgroupSize < (int)subgroupSizeControlProperties.minSubgroupSize
- || caseDef.requiredSubgroupSize > (int)subgroupSizeControlProperties.maxSubgroupSize)
+ if (caseDef.requiredSubgroupSize < subgroupSizeControlProperties.minSubgroupSize
+ || caseDef.requiredSubgroupSize > subgroupSizeControlProperties.maxSubgroupSize)
{
TCU_THROW(NotSupportedError, "Unsupported subgroup size");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkDeviceSize numElements = caseDef.extShaderSubGroupBallotTests ? 64u : subgroups::maxSupportedSubgroupSize();
+ const subgroups::SSBOData inputData =
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ numElements, // vk::VkDeviceSize numElements;
+ };
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd140;
- inputData.numElements = caseDef.extShaderSubGroupBallotTests ? 64u : subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ const VkDeviceSize numElements = caseDef.extShaderSubGroupBallotTests ? 64u : subgroups::maxSupportedSubgroupSize();
+
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData =
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = caseDef.extShaderSubGroupBallotTests ? 64u : subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ numElements, // vk::VkDeviceSize numElements;
+ };
if (caseDef.subgroupSizeControl)
- return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
- caseDef.requiredSubgroupSize, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute, caseDef.requiredSubgroupSize, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
else
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = caseDef.extShaderSubGroupBallotTests ? 64u : subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ numElements, // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlagBits stages;
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ numElements, // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlagBits stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotBroadcastTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBallotBroadcastTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup ballot broadcast category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup ballot broadcast category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup ballot broadcast category tests: framebuffer"));
-
- de::MovePtr<tcu::TestCaseGroup> graphicGroupARB(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup ballot broadcast category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroupARB(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup ballot broadcast category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroupARB(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup ballot broadcast category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "ballot_broadcast", "Subgroup ballot broadcast category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup ballot broadcast category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup ballot broadcast category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup ballot broadcast category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup ballot broadcast category tests: ray tracing"));
+
+ de::MovePtr<TestCaseGroup> groupARB (new TestCaseGroup(testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroup_ballot category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroupARB (new TestCaseGroup(testCtx, "graphics", "Subgroup ballot broadcast category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroupARB (new TestCaseGroup(testCtx, "compute", "Subgroup ballot broadcast category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroupARB (new TestCaseGroup(testCtx, "framebuffer", "Subgroup ballot broadcast category tests: framebuffer"));
+
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
- // Vector, boolean and double types are not supported by functions defined in VK_EXT_shader_subgroup_ballot.
- const bool formatTypeIsSupportedARB =
- format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT || format == VK_FORMAT_R32_SFLOAT;
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const std::string name = getOpTypeCaseName(opTypeIndex) + "_" + subgroups::getFormatNameForGLSL(format);
+ const VkFormat format = formats[formatIndex];
+ // Vector, boolean and double types are not supported by functions defined in VK_EXT_shader_subgroup_ballot.
+ const bool formatTypeIsSupportedARB = format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT || format == VK_FORMAT_R32_SFLOAT;
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE, 0};
- addFunctionCaseWithPrograms(computeGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- if (formatTypeIsSupportedARB)
- addFunctionCaseWithPrograms(computeGroupARB.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string name = getOpTypeCaseName(opType) + "_" + subgroups::getFormatNameForGLSL(format);
- for (int subgroupSize = 1; subgroupSize <= (int)subgroups::maxSupportedSubgroupSize(); subgroupSize *= 2)
+ for (size_t extNdx = 0; extNdx < DE_LENGTH_OF_ARRAY(boolValues); ++extNdx)
{
- std::string testName = name + "_requiredsubgroupsize" + de::toString(subgroupSize);
- caseDef.extShaderSubGroupBallotTests = DE_FALSE;
- caseDef.subgroupSizeControl = DE_TRUE;
- caseDef.requiredSubgroupSize = subgroupSize;
- addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- if (formatTypeIsSupportedARB)
- addFunctionCaseWithPrograms(computeGroupARB.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ const deBool extShaderSubGroupBallotTests = boolValues[extNdx];
+
+ if (extShaderSubGroupBallotTests && !formatTypeIsSupportedARB)
+ continue;
+
+ {
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? computeGroupARB.get() : computeGroup.get();
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool subgroupSizeControl;
+ 0u // deUint32 requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(testGroup, name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (deUint32 subgroupSize = 1; subgroupSize <= subgroups::maxSupportedSubgroupSize(); subgroupSize *= 2)
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_TRUE, // deBool subgroupSizeControl;
+ subgroupSize, // deUint32 requiredSubgroupSize;
+ };
+ const string testName = name + "_requiredsubgroupsize" + de::toString(subgroupSize);
+
+ addFunctionCaseWithPrograms(testGroup, testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+ }
+
+ {
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? graphicGroupARB.get() : graphicGroup.get();
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool subgroupSizeControl;
+ 0u // deUint32 requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(testGroup, name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? framebufferGroupARB.get() : framebufferGroup.get();
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool subgroupSizeControl;
+ 0u // deUint32 requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(testGroup, name + getShaderStageName(caseDef.shaderStage), "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
+ }
}
}
+ }
+ }
- {
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE, 0};
- addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- if (formatTypeIsSupportedARB)
- addFunctionCaseWithPrograms(graphicGroupARB.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
- }
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE, 0};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- if (formatTypeIsSupportedARB)
- addFunctionCaseWithPrograms(framebufferGroupARB.get(), name + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string name = getOpTypeCaseName(opType) + "_" + formatName;
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool subgroupSizeControl;
+ 0 // int requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
}
}
}
- de::MovePtr<tcu::TestCaseGroup> groupARB(new tcu::TestCaseGroup(
- testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroup_ballot category tests"));
-
groupARB->addChild(graphicGroupARB.release());
groupARB->addChild(computeGroupARB.release());
groupARB->addChild(framebufferGroupARB.release());
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "ballot_broadcast", "Subgroup ballot broadcast category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
group->addChild(groupARB.release());
return group.release();
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotBroadcastTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBallotBroadcastTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
MASKTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ MaskType maskType;
+ VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0xf);
+
+ return subgroups::check(datas, width, 0xf);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0xf);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0xf);
}
-std::string getMaskTypeName(int maskType)
+string getMaskTypeName (const MaskType maskType)
{
switch (maskType)
{
- default:
- DE_FATAL("Unsupported mask type");
- return "";
- case MASKTYPE_EQ:
- return "gl_SubGroupEqMaskARB";
- case MASKTYPE_GE:
- return "gl_SubGroupGeMaskARB";
- case MASKTYPE_GT:
- return "gl_SubGroupGtMaskARB";
- case MASKTYPE_LE:
- return "gl_SubGroupLeMaskARB";
- case MASKTYPE_LT:
- return "gl_SubGroupLtMaskARB";
+ case MASKTYPE_EQ: return "gl_SubGroupEqMaskARB";
+ case MASKTYPE_GE: return "gl_SubGroupGeMaskARB";
+ case MASKTYPE_GT: return "gl_SubGroupGtMaskARB";
+ case MASKTYPE_LE: return "gl_SubGroupLeMaskARB";
+ case MASKTYPE_LT: return "gl_SubGroupLtMaskARB";
+ default: TCU_THROW(InternalError, "Unsupported mask type");
}
}
-
-struct CaseDefinition
+string getBodySource (const CaseDefinition& caseDef)
{
- int maskType;
- VkShaderStageFlags shaderStage;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
-
-std::string getBodySource(CaseDefinition caseDef)
-{
- std::ostringstream bdy;
-
- bdy << "uint64_t value = " << getMaskTypeName(caseDef.maskType) << ";\n";
- bdy << "bool temp = true;\n";
+ string body =
+ " uint64_t value = " + getMaskTypeName(caseDef.maskType) + ";\n"
+ " bool temp = true;\n";
switch(caseDef.maskType)
{
- case MASKTYPE_EQ:
- bdy << "uint64_t mask = uint64_t(1) << gl_SubGroupInvocationARB;\n";
- bdy << "temp = (value & mask) != 0;\n";
- break;
- case MASKTYPE_GE:
- bdy << "for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n";
- bdy << " uint64_t mask = uint64_t(1) << i;\n";
- bdy << " if (i >= gl_SubGroupInvocationARB && (value & mask) == 0)\n";
- bdy << " temp = false;\n";
- bdy << " if (i < gl_SubGroupInvocationARB && (value & mask) != 0)\n";
- bdy << " temp = false;\n";
- bdy << "};\n";
- break;
- case MASKTYPE_GT:
- bdy << "for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n";
- bdy << " uint64_t mask = uint64_t(1) << i;\n";
- bdy << " if (i > gl_SubGroupInvocationARB && (value & mask) == 0)\n";
- bdy << " temp = false;\n";
- bdy << " if (i <= gl_SubGroupInvocationARB && (value & mask) != 0)\n";
- bdy << " temp = false;\n";
- bdy << "};\n";
- break;
- case MASKTYPE_LE:
- bdy << "for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n";
- bdy << " uint64_t mask = uint64_t(1) << i;\n";
- bdy << " if (i <= gl_SubGroupInvocationARB && (value & mask) == 0)\n";
- bdy << " temp = false;\n";
- bdy << " if (i > gl_SubGroupInvocationARB && (value & mask) != 0)\n";
- bdy << " temp = false;\n";
- bdy << "};\n";
- break;
- case MASKTYPE_LT:
- bdy << "for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n";
- bdy << " uint64_t mask = uint64_t(1) << i;\n";
- bdy << " if (i < gl_SubGroupInvocationARB && (value & mask) == 0)\n";
- bdy << " temp = false;\n";
- bdy << " if (i >= gl_SubGroupInvocationARB && (value & mask) != 0)\n";
- bdy << " temp = false;\n";
- bdy << "};\n";
- break;
+ case MASKTYPE_EQ:
+ body += " uint64_t mask = uint64_t(1) << gl_SubGroupInvocationARB;\n"
+ " temp = (value & mask) != 0;\n";
+ break;
+ case MASKTYPE_GE:
+ body += " for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n"
+ " uint64_t mask = uint64_t(1) << i;\n"
+ " if (i >= gl_SubGroupInvocationARB && (value & mask) == 0)\n"
+ " temp = false;\n"
+ " if (i < gl_SubGroupInvocationARB && (value & mask) != 0)\n"
+ " temp = false;\n"
+ " };\n";
+ break;
+ case MASKTYPE_GT:
+ body += " for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n"
+ " uint64_t mask = uint64_t(1) << i;\n"
+ " if (i > gl_SubGroupInvocationARB && (value & mask) == 0)\n"
+ " temp = false;\n"
+ " if (i <= gl_SubGroupInvocationARB && (value & mask) != 0)\n"
+ " temp = false;\n"
+ " };\n";
+ break;
+ case MASKTYPE_LE:
+ body += " for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n"
+ " uint64_t mask = uint64_t(1) << i;\n"
+ " if (i <= gl_SubGroupInvocationARB && (value & mask) == 0)\n"
+ " temp = false;\n"
+ " if (i > gl_SubGroupInvocationARB && (value & mask) != 0)\n"
+ " temp = false;\n"
+ " };\n";
+ break;
+ case MASKTYPE_LT:
+ body += " for (uint i = 0; i < gl_SubGroupSizeARB; i++) {\n"
+ " uint64_t mask = uint64_t(1) << i;\n"
+ " if (i < gl_SubGroupInvocationARB && (value & mask) == 0)\n"
+ " temp = false;\n"
+ " if (i >= gl_SubGroupInvocationARB && (value & mask) != 0)\n"
+ " temp = false;\n"
+ " };\n";
+ break;
+ default:
+ TCU_THROW(InternalError, "Unknown mask type");
}
- bdy << "uint tempResult = temp ? 0xf : 0x2;\n";
- return bdy.str();
+ body += " uint tempResult = temp ? 0xf : 0x2;\n";
+ body += " tempRes = tempResult;\n";
+
+ return body;
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition&)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- subgroups::setFragmentShaderFrameBuffer(programCollection);
+ return
+ "#extension GL_ARB_shader_ballot: enable\n"
+ "#extension GL_ARB_gpu_shader_int64: enable\n";
+}
- if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
- std::string bdyStr = getBodySource(caseDef);
+ if (fragment)
+ result.reserve(result.size() + 1);
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertex;
- vertex << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float out_color;\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
+ for (size_t i = 0; i < result.size(); ++i)
{
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
-
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
-
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_InvocationID == 0)\n"
- << " {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << bdyStr
- << " out_color[gl_InvocationID ] = float(tempResult);\n"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
- << "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
- std::ostringstream evaluationSource;
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
}
- else
+
+ if (fragment)
{
- DE_FATAL("Unsupported shader stage");
+ const string fragPart =
+ "layout(location = 0) out uint result;\n";
+
+ result.push_back(fragPart);
}
+
+ return result;
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+vector<string> getFramebufferPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- std::string bdyStr = getBodySource(caseDef);
+ vector<string> result;
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
-
- src << "#version 450\n"
- << "#extension GL_ARB_shader_ballot: enable\n"
- << "#extension GL_ARB_gpu_shader_int64: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << bdyStr
- << " result[offset] = tempResult;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
- const string vertex =
- "#version 450\n"
- "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
-
- const string tesc =
- "#version 450\n"
- "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
-
- const string tese =
- "#version 450\n"
- "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
-
- const string geometry =
- "#version 450\n"
- "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- const string fragment =
- "#version 450\n"
- "#extension GL_ARB_shader_ballot: enable\n"
- "#extension GL_ARB_gpu_shader_int64: enable\n"
- "layout(location = 0) out uint result;\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result = tempResult;\n"
- "}\n";
-
- subgroups::addNoSubgroupShader(programCollection);
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ DE_UNREF(caseDef);
+
+ result.push_back("layout(location = 0) out float result;\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+ result.push_back("layout(location = 0) out float out_color[];\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+
+ return result;
+}
+
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getBodySource(caseDef);
+ const vector<string> headDeclarations = getFramebufferPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
+}
+
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getBodySource(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupport = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupport, extHeader, testSrc, "", headDeclarations);
}
void supportedCheck (Context& context, CaseDefinition caseDef)
{
- DE_UNREF(caseDef);
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
+ switch (caseDef.shaderStage)
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
}
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
}
-tcu::TestStatus test (Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkCompute,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
- {
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
return subgroups::allStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
}
- return tcu::TestStatus::pass("OK");
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotMasksTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBallotMasksTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "VK_EXT_shader_subgroup_ballot masks category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "VK_EXT_shader_subgroup_ballot masks category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "VK_EXT_shader_subgroup_ballot masks category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "ballot_mask", "VK_EXT_shader_subgroup_ballot mask category tests"));
+ de::MovePtr<TestCaseGroup> groupARB (new TestCaseGroup(testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroup_ballot masks category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "VK_EXT_shader_subgroup_ballot masks category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "VK_EXT_shader_subgroup_ballot masks category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "VK_EXT_shader_subgroup_ballot masks category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "VK_EXT_shader_subgroup_ballot masks category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
+
for (int maskTypeIndex = 0; maskTypeIndex < MASKTYPE_LAST; ++maskTypeIndex)
{
- const string mask = de::toLower(getMaskTypeName(maskTypeIndex));
+ const MaskType maskType = static_cast<MaskType>(maskTypeIndex);
+ const string mask = de::toLower(getMaskTypeName(maskType));
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- CaseDefinition caseDef = {maskTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), mask, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), mask + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = mask + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ maskType, // MaskType maskType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
{
- const CaseDefinition caseDef = {maskTypeIndex, VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE};
+ const CaseDefinition caseDef =
+ {
+ maskType, // MaskType maskType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
addFunctionCaseWithPrograms(graphicGroup.get(), mask, "", supportedCheck, initPrograms, test, caseDef);
}
+ {
+ const CaseDefinition caseDef =
+ {
+ maskType, // MaskType maskType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), mask, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- const CaseDefinition caseDef = {maskTypeIndex, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), mask + "_" + getShaderStageName(caseDef.shaderStage), "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ maskType, // MaskType maskType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = mask + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- de::MovePtr<tcu::TestCaseGroup> groupARB(new tcu::TestCaseGroup(
- testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroup_ballot masks category tests"));
-
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "ballot_mask", "VK_EXT_shader_subgroup_ballot mask category tests"));
-
groupARB->addChild(graphicGroup.release());
groupARB->addChild(computeGroup.release());
groupARB->addChild(framebufferGroup.release());
+ groupARB->addChild(raytracingGroup.release());
group->addChild(groupARB.release());
return group.release();
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotMasksTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBallotMasksTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0xf);
+
+ return subgroups::check(datas, width, 0xf);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0xf);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0xf);
}
-std::string getOpTypeName(int opType)
+string getOpTypeName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_INVERSE_BALLOT:
- return "subgroupInverseBallot";
- case OPTYPE_BALLOT_BIT_EXTRACT:
- return "subgroupBallotBitExtract";
- case OPTYPE_BALLOT_BIT_COUNT:
- return "subgroupBallotBitCount";
- case OPTYPE_BALLOT_INCLUSIVE_BIT_COUNT:
- return "subgroupBallotInclusiveBitCount";
- case OPTYPE_BALLOT_EXCLUSIVE_BIT_COUNT:
- return "subgroupBallotExclusiveBitCount";
- case OPTYPE_BALLOT_FIND_LSB:
- return "subgroupBallotFindLSB";
- case OPTYPE_BALLOT_FIND_MSB:
- return "subgroupBallotFindMSB";
+ case OPTYPE_INVERSE_BALLOT: return "subgroupInverseBallot";
+ case OPTYPE_BALLOT_BIT_EXTRACT: return "subgroupBallotBitExtract";
+ case OPTYPE_BALLOT_BIT_COUNT: return "subgroupBallotBitCount";
+ case OPTYPE_BALLOT_INCLUSIVE_BIT_COUNT: return "subgroupBallotInclusiveBitCount";
+ case OPTYPE_BALLOT_EXCLUSIVE_BIT_COUNT: return "subgroupBallotExclusiveBitCount";
+ case OPTYPE_BALLOT_FIND_LSB: return "subgroupBallotFindLSB";
+ case OPTYPE_BALLOT_FIND_MSB: return "subgroupBallotFindMSB";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-struct CaseDefinition
+string getExtHeader (const CaseDefinition&)
{
- int opType;
- VkShaderStageFlags shaderStage;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
+ return "#extension GL_KHR_shader_subgroup_ballot: enable\n";
+}
-std::string getBodySource(CaseDefinition caseDef)
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- std::ostringstream bdy;
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
+
+ if (fragment)
+ result.reserve(result.size() + 1);
+
+ for (size_t i = 0; i < result.size(); ++i)
+ {
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
+ }
+
+ if (fragment)
+ {
+ const string fragPart =
+ "layout(location = 0) out uint result;\n"
+ "precision highp int;\n";
+
+ result.push_back(fragPart);
+ }
+
+ return result;
+}
+
+vector<string> getFramebufferPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ vector<string> result;
+
+ DE_UNREF(caseDef);
+
+ result.push_back("layout(location = 0) out float result;\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+ result.push_back("layout(location = 0) out float out_color[];\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+
+ return result;
+}
+
+string getTestString (const CaseDefinition& caseDef)
+{
+ ostringstream bdy;
bdy << " uvec4 allOnes = uvec4(0xFFFFFFFF);\n"
<< " uvec4 allZeros = uvec4(0);\n"
<< " }\n";
break;
}
- return bdy.str();
-}
-
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
-{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- subgroups::setFragmentShaderFrameBuffer(programCollection);
+ bdy << " tempRes = tempResult;\n";
- if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
+ return bdy.str();
+}
- std::string bdyStr = getBodySource(caseDef);
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestString(caseDef);
+ const vector<string> headDeclarations = getFramebufferPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertex;
- vertex << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float out_color;\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- {
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
-
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
-
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_InvocationID == 0)\n"
- << " {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << bdyStr
- << " out_color[gl_InvocationID ] = float(tempResult);\n"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
- << "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
- std::ostringstream evaluationSource;
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "void main (void)\n"
- << "{\n"
- << bdyStr
- << " out_color = float(tempResult);\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
- }
- else
- {
- DE_FATAL("Unsupported shader stage");
- }
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- std::string bdyStr = getBodySource(caseDef);
-
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
-
- src << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << bdyStr
- << " result[offset] = tempResult;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
- const string vertex =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
-
- const string tesc =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
-
- const string tese =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
-
- const string geometry =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- const string fragment =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(location = 0) out uint result;\n"
- "precision highp int;\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result = tempResult;\n"
- "}\n";
-
- subgroups::addNoSubgroupShader(programCollection);
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestString(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
}
void supportedCheck (Context& context, CaseDefinition caseDef)
{
- DE_UNREF(caseDef);
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
+ switch (caseDef.shaderStage)
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
}
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
}
-tcu::TestStatus test (Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkCompute,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkCompute,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
+
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
- {
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
return subgroups::allStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
}
- return tcu::TestStatus::pass("OK");
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
+
+ return TestStatus::pass("OK");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotOtherTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBallotOtherTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup ballot other category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup ballot other category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup ballot other category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "ballot_other", "Subgroup ballot other category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup ballot other category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup ballot other category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup ballot other category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup ballot other category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const string op = de::toLower(getOpTypeName(opTypeIndex));
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string op = de::toLower(getOpTypeName(opType));
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), op + "_requiredsubgroupSize", "", supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = op + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
{
- const CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE};
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
addFunctionCaseWithPrograms(graphicGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
}
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- const CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), op + "_" + getShaderStageName(caseDef.shaderStage), "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = op + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "ballot_other", "Subgroup ballot other category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotOtherTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBallotOtherTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
namespace
{
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
-{
- DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0x7);
-}
-
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
-{
- DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0x7);
-}
-
struct CaseDefinition
{
VkShaderStageFlags shaderStage;
deBool requiredSubgroupSize;
};
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
+{
+ DE_UNREF(internalData);
+
+ return subgroups::check(datas, width, 0x7);
+}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
- const vk::SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3);
- std::ostringstream subgroupSizeStr;
- subgroupSizeStr << subgroups::maxSupportedSubgroupSize();
- const string extensionHeader = (caseDef.extShaderSubGroupBallotTests ? "OpExtension \"SPV_KHR_shader_ballot\"\n" : "");
- const string capabilityBallotHeader = (caseDef.extShaderSubGroupBallotTests ? "OpCapability SubgroupBallotKHR\n" : "OpCapability GroupNonUniformBallot\n");
+ DE_UNREF(internalData);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0x7);
+}
+
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ const string extensionHeader = (caseDef.extShaderSubGroupBallotTests ? "OpExtension \"SPV_KHR_shader_ballot\"\n" : "");
+ const string capabilityBallotHeader = (caseDef.extShaderSubGroupBallotTests ? "OpCapability SubgroupBallotKHR\n" : "OpCapability GroupNonUniformBallot\n");
+ const string subgroupSizeStr = de::toString(subgroups::maxSupportedSubgroupSize());
subgroups::setFragmentShaderFrameBuffer(programCollection);
"%21 = OpConstant %20 1\n"
"%22 = OpConstant %20 0\n"
"%27 = OpTypePointer Function %12\n"
- "%29 = OpConstant %6 " + subgroupSizeStr.str() + "\n"
+ "%29 = OpConstant %6 " + subgroupSizeStr + "\n"
"%30 = OpTypeArray %6 %29\n"
"%31 = OpTypeStruct %30\n"
"%32 = OpTypePointer Uniform %31\n"
" EndPrimitive();\n"
"}\n";
*/
- std::ostringstream geometry;
+ ostringstream geometry;
+
geometry
<< "; SPIR-V\n"
<< "; Version: 1.3\n"
<< "%21 = OpConstant %20 1\n"
<< "%22 = OpConstant %20 0\n"
<< "%27 = OpTypePointer Function %12\n"
- << "%29 = OpConstant %6 " << subgroupSizeStr.str() << "\n"
+ << "%29 = OpConstant %6 " << subgroupSizeStr << "\n"
<< "%30 = OpTypeArray %6 %29\n"
<< "%31 = OpTypeStruct %30\n"
<< "%32 = OpTypePointer Uniform %31\n"
"%32 = OpConstant %16 3\n"
"%34 = OpTypeVector %11 4\n"
"%42 = OpTypePointer Function %11\n"
- "%44 = OpConstant %16 " + subgroupSizeStr.str() + "\n"
+ "%44 = OpConstant %16 " + subgroupSizeStr + "\n"
"%45 = OpTypeArray %16 %44\n"
"%46 = OpTypeStruct %45\n"
"%47 = OpTypePointer Uniform %46\n"
"%21 = OpConstant %20 1\n"
"%22 = OpConstant %20 0\n"
"%27 = OpTypePointer Function %12\n"
- "%29 = OpConstant %6 " + subgroupSizeStr.str() + "\n"
+ "%29 = OpConstant %6 " + subgroupSizeStr + "\n"
"%30 = OpTypeArray %6 %29\n"
"%31 = OpTypeStruct %30\n"
"%32 = OpTypePointer Uniform %31\n"
}
}
-
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition& caseDef)
{
- const string extensionHeader = (caseDef.extShaderSubGroupBallotTests ?
+ return (caseDef.extShaderSubGroupBallotTests ?
"#extension GL_ARB_shader_ballot: enable\n"
"#extension GL_ARB_gpu_shader_int64: enable\n"
"#extension GL_KHR_shader_subgroup_basic: enable\n"
:
"#extension GL_KHR_shader_subgroup_ballot: enable\n");
+}
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
+string getBodySource (const CaseDefinition& caseDef)
+{
+ const string cmpStr = caseDef.extShaderSubGroupBallotTests ? "uint64_t(0) == ballotARB" : "uvec4(0) == subgroupBallot";
- src << "#version 450\n"
- << extensionHeader.c_str()
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
- << "{\n"
- << " uint data[];\n"
- << "};\n"
- << "\n"
- << (caseDef.extShaderSubGroupBallotTests ? subgroups::getSharedMemoryBallotHelperARB() : subgroups::getSharedMemoryBallotHelper())
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << " uint tempResult = 0;\n"
- << " tempResult |= sharedMemoryBallot(true) == " << (caseDef.extShaderSubGroupBallotTests ? "ballotARB" : "subgroupBallot") << "(true) ? 0x1 : 0;\n"
- << " bool bData = data[gl_SubgroupInvocationID] != 0;\n"
- << " tempResult |= sharedMemoryBallot(bData) == " << (caseDef.extShaderSubGroupBallotTests ? "ballotARB" : "subgroupBallot") << "(bData) ? 0x2 : 0;\n"
- << " tempResult |= " << (caseDef.extShaderSubGroupBallotTests ? "uint64_t(0) == ballotARB" : "uvec4(0) == subgroupBallot") << "(false) ? 0x4 : 0;\n"
- << " result[offset] = tempResult;\n"
- << "}\n";
+ if (isAllComputeStages(caseDef.shaderStage))
+ {
+ const string cmpStrB = caseDef.extShaderSubGroupBallotTests ? "ballotARB" : "subgroupBallot";
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ return
+ " uint tempResult = 0;\n"
+ " tempResult |= sharedMemoryBallot(true) == " + cmpStrB + "(true) ? 0x1 : 0;\n"
+ " bool bData = data[gl_SubgroupInvocationID] != 0;\n"
+ " tempResult |= sharedMemoryBallot(bData) == " + cmpStrB + "(bData) ? 0x2 : 0;\n"
+ " tempResult |= " + cmpStr + "(false) ? 0x4 : 0;\n"
+ " tempRes = tempResult;\n";
}
else
{
- const string cmpStr = (caseDef.extShaderSubGroupBallotTests ? "uint64_t(0) == ballotARB" : "uvec4(0) == subgroupBallot");
- const string testSrc =
+ return
" uint tempResult = 0;\n"
" tempResult |= !bool(" + cmpStr + "(true)) ? 0x1 : 0;\n"
" bool bData = data[gl_SubgroupInvocationID] != 0;\n"
" tempResult |= !bool(" + cmpStr + "(bData)) ? 0x2 : 0;\n"
- " tempResult |= " + cmpStr + "(false) ? 0x4 : 0;\n";
-
- const string vertex =
- "#version 450\n"
- + extensionHeader +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " uint data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + testSrc +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
-
- const string tesc =
- "#version 450\n"
- + extensionHeader +
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " uint data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + testSrc +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "" ) +
- "}\n";
-
- const string tese =
- "#version 450\n"
- + extensionHeader +
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " uint data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + testSrc +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" ) +
- "}\n";
-
- const string geometry =
- "#version 450\n"
- + extensionHeader +
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " uint data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + testSrc +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" ) +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- const string fragment =
- "#version 450\n"
- + extensionHeader +
- "layout(location = 0) out uint result;\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer1\n"
- "{\n"
- " uint data[];\n"
- "};\n"
- "void main (void)\n"
- "{\n"
- + testSrc +
- " result = tempResult;\n"
- "}\n";
-
- subgroups::addNoSubgroupShader(programCollection);
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ " tempResult |= " + cmpStr + "(false) ? 0x4 : 0;\n"
+ " tempRes = tempResult;\n";
}
}
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getBodySource(caseDef);
+ const string testHelper = !isAllComputeStages(caseDef.shaderStage) ? ""
+ : caseDef.extShaderSubGroupBallotTests ? subgroups::getSharedMemoryBallotHelperARB()
+ : subgroups::getSharedMemoryBallotHelper();
+ const bool pointSizeSupport = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupport, extHeader, testSrc, testHelper);
+}
+
void supportedCheck (Context& context, CaseDefinition caseDef)
{
if (!subgroups::isSubgroupSupported(context))
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData =
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
-
- subgroups::SSBOData inputData[1];
- inputData[0].format = VK_FORMAT_R32_UINT;
- inputData[0].layout = subgroups::SSBOData::LayoutStd140;
- inputData[0].numElements = subgroups::maxSupportedSubgroupSize();
- inputData[0].initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData =
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData[1];
- inputData[0].format = VK_FORMAT_R32_UINT;
- inputData[0].layout = subgroups::SSBOData::LayoutStd430;
- inputData[0].numElements = subgroups::maxSupportedSubgroupSize();
- inputData[0].initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
- return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkCompute);
+ return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputData, 1, DE_NULL, checkCompute,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = VK_FORMAT_R32_UINT;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBallotTests(TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup ballot category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup ballot category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup ballot category tests: framebuffer"));
-
- de::MovePtr<tcu::TestCaseGroup> graphicGroupEXT(new tcu::TestCaseGroup(
- testCtx, "graphics", "VK_EXT_shader_subgroups_ballot category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroupEXT(new tcu::TestCaseGroup(
- testCtx, "compute", "VK_EXT_shader_subgroups_ballot category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroupEXT(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "VK_EXT_shader_subgroups_ballot category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "ballot", "Subgroup ballot category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup ballot category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup ballot category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup ballot category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup ballot category tests: ray tracing"));
+ de::MovePtr<TestCaseGroup> groupEXT (new TestCaseGroup(testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroups_ballot category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroupEXT (new TestCaseGroup(testCtx, "graphics", "VK_EXT_shader_subgroups_ballot category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroupEXT (new TestCaseGroup(testCtx, "compute", "VK_EXT_shader_subgroups_ballot category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroupEXT (new TestCaseGroup(testCtx, "framebuffer", "VK_EXT_shader_subgroups_ballot category tests: framebuffer"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
- VK_SHADER_STAGE_VERTEX_BIT
+ VK_SHADER_STAGE_VERTEX_BIT,
+ };
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
};
-
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- CaseDefinition caseDef = {VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), getShaderStageName(caseDef.shaderStage), "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroupEXT.get(), getShaderStageName(caseDef.shaderStage), "", supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
- caseDef.requiredSubgroupSize = DE_TRUE;
- caseDef.extShaderSubGroupBallotTests = DE_FALSE;
- addFunctionCaseWithPrograms(computeGroup.get(), getShaderStageName(caseDef.shaderStage) + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroupEXT.get(), getShaderStageName(caseDef.shaderStage) + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
+ for (size_t extNdx = 0; extNdx < DE_LENGTH_OF_ARRAY(boolValues); ++extNdx)
+ {
+ const deBool extShaderSubGroupBallotTests = boolValues[extNdx];
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? computeGroupEXT.get() : computeGroup.get();
+ {
+ const CaseDefinition caseDef =
+ {
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ };
+ const string testName = getShaderStageName(caseDef.shaderStage) + testNameSuffix;
+
+ addFunctionCaseWithPrograms(testGroup, testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+ }
}
+ for (size_t extNdx = 0; extNdx < DE_LENGTH_OF_ARRAY(boolValues); ++extNdx)
{
- CaseDefinition caseDef = {VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(), "graphic", "", supportedCheck, initPrograms, test, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- addFunctionCaseWithPrograms(graphicGroupEXT.get(), "graphic", "", supportedCheck, initPrograms, test, caseDef);
+ const deBool extShaderSubGroupBallotTests = boolValues[extNdx];
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? graphicGroupEXT.get() : graphicGroup.get();
+ const CaseDefinition caseDef =
+ {
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(testGroup, "graphic", "", supportedCheck, initPrograms, test, caseDef);
}
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- CaseDefinition caseDef = {stages[stageIndex],de::SharedPtr<bool>(new bool), DE_FALSE, DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
- caseDef.extShaderSubGroupBallotTests = DE_TRUE;
- addFunctionCaseWithPrograms(framebufferGroupEXT.get(), getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ };
+ addFunctionCaseWithPrograms(raytracingGroup.get(), "test", "", supportedCheck, initPrograms, test, caseDef);
}
- de::MovePtr<tcu::TestCaseGroup> groupEXT(new tcu::TestCaseGroup(
- testCtx, "ext_shader_subgroup_ballot", "VK_EXT_shader_subgroups_ballot category tests"));
+ for (size_t extNdx = 0; extNdx < DE_LENGTH_OF_ARRAY(boolValues); ++extNdx)
+ {
+ const deBool extShaderSubGroupBallotTests = boolValues[extNdx];
+ TestCaseGroup* testGroup = extShaderSubGroupBallotTests ? framebufferGroupEXT.get() : framebufferGroup.get();
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ extShaderSubGroupBallotTests, // deBool extShaderSubGroupBallotTests;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(testGroup, getShaderStageName(caseDef.shaderStage), "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
+ }
groupEXT->addChild(graphicGroupEXT.release());
groupEXT->addChild(computeGroupEXT.release());
groupEXT->addChild(framebufferGroupEXT.release());
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "ballot", "Subgroup ballot category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
-
+ group->addChild(raytracingGroup.release());
group->addChild(groupEXT.release());
return group.release();
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBallotTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBallotTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
namespace
{
-static const deUint32 ELECTED_VALUE = 42u;
-static const deUint32 UNELECTED_VALUE = 13u;
-static const vk::VkDeviceSize SHADER_BUFFER_SIZE = 4096ull; // min(maxUniformBufferRange, maxImageDimension1D)
+enum OpType
+{
+ OPTYPE_ELECT = 0,
+ OPTYPE_SUBGROUP_BARRIER,
+ OPTYPE_SUBGROUP_MEMORY_BARRIER,
+ OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER,
+ OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED,
+ OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE,
+ OPTYPE_LAST
+};
-static bool _checkFragmentSubgroupBarriersNoSSBO(std::vector<const void*> datas,
- deUint32 width, deUint32 height, bool withImage)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static const deUint32 ELECTED_VALUE = 42u;
+static const deUint32 UNELECTED_VALUE = 13u;
+static const VkDeviceSize SHADER_BUFFER_SIZE = 4096ull; // min(maxUniformBufferRange, maxImageDimension1D)
+
+static bool _checkFragmentSubgroupBarriersNoSSBO (vector<const void*> datas,
+ deUint32 width,
+ deUint32 height,
+ bool withImage)
{
const float* const resultData = reinterpret_cast<const float*>(datas[0]);
for (deUint32 y = 0u; y < height; ++y)
{
const deUint32 ndx = (x * height + y) * 4u;
+
if (!withImage && 0.0f == resultData[ndx])
{
return false;
return true;
}
-static bool checkFragmentSubgroupBarriersNoSSBO(const void *internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height, deUint32)
+static bool checkFragmentSubgroupBarriersNoSSBO (const void *internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height,
+ deUint32)
{
DE_UNREF(internalData);
+
return _checkFragmentSubgroupBarriersNoSSBO(datas, width, height, false);
}
-static bool checkFragmentSubgroupBarriersWithImageNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height, deUint32)
+static bool checkFragmentSubgroupBarriersWithImageNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height,
+ deUint32)
{
DE_UNREF(internalData);
+
return _checkFragmentSubgroupBarriersNoSSBO(datas, width, height, true);
}
-static bool checkVertexPipelineStagesSubgroupElectNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStagesSubgroupElectNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
+
const float* const resultData = reinterpret_cast<const float*>(datas[0]);
float poisonValuesFound = 0.0f;
float numSubgroupsUsed = 0.0f;
break;
}
}
+
return numSubgroupsUsed == poisonValuesFound;
}
-static bool checkVertexPipelineStagesSubgroupElect(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32, bool multipleCallsPossible)
+static bool checkVertexPipelineStagesSubgroupElect (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32,
+ bool multipleCallsPossible)
{
DE_UNREF(internalData);
- const deUint32* const resultData =
- reinterpret_cast<const deUint32*>(datas[0]);
- deUint32 poisonValuesFound = 0;
+
+ const deUint32* const resultData = reinterpret_cast<const deUint32*>(datas[0]);
+ deUint32 poisonValuesFound = 0;
for (deUint32 x = 0; x < width; ++x)
{
}
// we used an atomicly incremented counter to note how many subgroups we used for the vertex shader
- const deUint32 numSubgroupsUsed =
- *reinterpret_cast<const deUint32*>(datas[1]);
+ const deUint32 numSubgroupsUsed = *reinterpret_cast<const deUint32*>(datas[1]);
return (multipleCallsPossible ? (numSubgroupsUsed >= poisonValuesFound) : (numSubgroupsUsed == poisonValuesFound));
}
-static bool checkVertexPipelineStagesSubgroupBarriers(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStagesSubgroupBarriers (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
+
const deUint32* const resultData = reinterpret_cast<const deUint32*>(datas[0]);
// We used this SSBO to generate our unique value!
return true;
}
-static bool _checkVertexPipelineStagesSubgroupBarriersNoSSBO(std::vector<const void*> datas,
- deUint32 width, bool withImage)
+static bool _checkVertexPipelineStagesSubgroupBarriersNoSSBO (vector<const void*> datas,
+ deUint32 width,
+ bool withImage)
{
const float* const resultData = reinterpret_cast<const float*>(datas[0]);
return false;
}
}
+
return true;
}
-static bool checkVertexPipelineStagesSubgroupBarriersNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStagesSubgroupBarriersNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
+
return _checkVertexPipelineStagesSubgroupBarriersNoSSBO(datas, width, false);
}
-static bool checkVertexPipelineStagesSubgroupBarriersWithImageNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStagesSubgroupBarriersWithImageNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
+
return _checkVertexPipelineStagesSubgroupBarriersNoSSBO(datas, width, true);
}
-static bool _checkTessellationEvaluationSubgroupBarriersNoSSBO(std::vector<const void*> datas,
- deUint32 width, deUint32, bool withImage)
+static bool _checkTessellationEvaluationSubgroupBarriersNoSSBO (vector<const void*> datas,
+ deUint32 width,
+ deUint32,
+ bool withImage)
{
const float* const resultData = reinterpret_cast<const float*>(datas[0]);
for (deUint32 x = 0u; x < width; ++x)
{
const deUint32 ndx = x*4u;
+
if (!withImage && 0.0f == resultData[ndx])
{
return false;
return false;
}
}
+
return true;
}
-static bool checkTessellationEvaluationSubgroupBarriersWithImageNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height)
+static bool checkTessellationEvaluationSubgroupBarriersWithImageNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height)
{
DE_UNREF(internalData);
+
return _checkTessellationEvaluationSubgroupBarriersNoSSBO(datas, width, height, true);
}
-static bool checkTessellationEvaluationSubgroupBarriersNoSSBO(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height)
+static bool checkTessellationEvaluationSubgroupBarriersNoSSBO (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height)
{
DE_UNREF(internalData);
- return _checkTessellationEvaluationSubgroupBarriersNoSSBO(datas, width, height, false);
+
+ return _checkTessellationEvaluationSubgroupBarriersNoSSBO (datas, width, height, false);
}
-static bool checkComputeSubgroupElect(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkComputeSubgroupElect (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
}
-static bool checkComputeSubgroupBarriers(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkComputeSubgroupBarriers (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
+
// We used this SSBO to generate our unique value!
const deUint32 ref = *reinterpret_cast<const deUint32*>(datas[2]);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, ref);
-}
-enum OpType
-{
- OPTYPE_ELECT = 0,
- OPTYPE_SUBGROUP_BARRIER,
- OPTYPE_SUBGROUP_MEMORY_BARRIER,
- OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER,
- OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED,
- OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE,
- OPTYPE_LAST
-};
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, ref);
+}
-std::string getOpTypeName(int opType)
+string getOpTypeName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_ELECT:
- return "subgroupElect";
- case OPTYPE_SUBGROUP_BARRIER:
- return "subgroupBarrier";
- case OPTYPE_SUBGROUP_MEMORY_BARRIER:
- return "subgroupMemoryBarrier";
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER:
- return "subgroupMemoryBarrierBuffer";
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED:
- return "subgroupMemoryBarrierShared";
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE:
- return "subgroupMemoryBarrierImage";
+ case OPTYPE_ELECT: return "subgroupElect";
+ case OPTYPE_SUBGROUP_BARRIER: return "subgroupBarrier";
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER: return "subgroupMemoryBarrier";
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER: return "subgroupMemoryBarrierBuffer";
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED: return "subgroupMemoryBarrierShared";
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE: return "subgroupMemoryBarrierImage";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-struct CaseDefinition
-{
- int opType;
- VkShaderStageFlags shaderStage;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
-
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- const vk::SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
- if(VK_SHADER_STAGE_FRAGMENT_BIT != caseDef.shaderStage)
+ if (VK_SHADER_STAGE_FRAGMENT_BIT != caseDef.shaderStage)
{
/*
"layout(location = 0) in vec4 in_color;\n"
"OpStore %9 %12\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("fragment") << fragment;
}
if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
"OpStore %45 %38\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("vert") << vertex;
}
else if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
+ {
subgroups::setVertexShaderFrameBuffer(programCollection);
+ }
if (OPTYPE_ELECT == caseDef.opType)
{
- std::ostringstream electedValue ;
- std::ostringstream unelectedValue;
+ ostringstream electedValue ;
+ ostringstream unelectedValue;
+
electedValue << ELECTED_VALUE;
unelectedValue << UNELECTED_VALUE;
"OpStore %39 %20\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("vert") << vertex << buildOptionsSpr;
}
else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
" EndPrimitive();\n"
"}\n";
*/
- std::ostringstream geometry;
+ ostringstream geometry;
+
geometry
<< "; SPIR-V\n"
<< "; Version: 1.3\n"
<< "OpEndPrimitive\n"
<< "OpReturn\n"
<< "OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("geometry") << geometry.str() << buildOptionsSpr;
}
else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
"OpStore %45 %43\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("tesc") << controlSource << buildOptionsSpr;
/*
"OpStore %65 %63\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("tesc") << controlSource << buildOptionsSpr;
/*
"OpStore %38 %43\n"
"OpReturn\n"
"OpFunctionEnd\n";
+
programCollection.spirvAsmSources.add("tese") << evaluationSource << buildOptionsSpr;
}
else
- {
- DE_FATAL("Unsupported shader stage");
- }
+ TCU_THROW(InternalError, "Unsupported shader stage");
}
else
{
- std::ostringstream bdy;
- string color = (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage) ? "out_color[gl_InvocationID].b = 1.0f;\n" : "out_color.b = 1.0f;\n";
+ const string color = (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage) ? "out_color[gl_InvocationID].b = 1.0f;\n" : "out_color.b = 1.0f;\n";
+ ostringstream bdy;
+
switch (caseDef.opType)
{
- default:
- DE_FATAL("Unhandled op type!");
- break;
case OPTYPE_SUBGROUP_BARRIER:
case OPTYPE_SUBGROUP_MEMORY_BARRIER:
case OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER:
<< " " << getOpTypeName(caseDef.opType) << "();\n";
break;
}
+
case OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE:
bdy <<"tempResult2 = imageLoad(tempImage, ivec2(id, 0)).x;\n"
<< " if (subgroupElect())\n"
<< " tempResult = imageLoad(tempImage, ivec2(id, 0)).x;\n"
<< " }\n"
<< " subgroupMemoryBarrierImage();\n";
-
break;
+
+ default:
+ TCU_THROW(InternalError, "Unhandled op type");
}
if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
{
- std::ostringstream fragment;
+ ostringstream fragment;
+
fragment << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< "#extension GL_KHR_shader_subgroup_ballot: enable\n"
<< " out_color.g = float(value);\n"
<< " out_color.a = float(tempResult2);\n"
<< "}\n";
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment.str()) << buildOptions;
+
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(fragment.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
{
- std::ostringstream vertex;
+ ostringstream vertex;
+
vertex << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< "#extension GL_KHR_shader_subgroup_ballot: enable\n"
<< " gl_Position = in_position;\n"
<< " gl_PointSize = 1.0f;\n"
<< "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << buildOptions;
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(vertex.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
{
- std::ostringstream geometry;
+ ostringstream geometry;
geometry << "#version 450\n"
<< "#extension GL_KHR_shader_subgroup_ballot: enable\n"
<< " EndPrimitive();\n"
<< "}\n";
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
+ programCollection.glslSources.add("geometry") << glu::GeometrySource(geometry.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
{
- std::ostringstream controlSource;
- std::ostringstream evaluationSource;
+ ostringstream controlSource;
+ ostringstream evaluationSource;
controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_EXT_tessellation_shader : require\n"
<< (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" )
<< "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
+ programCollection.glslSources.add("tesc") << glu::TessellationControlSource(controlSource.str()) << buildOptions;
+ programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
{
- std::ostringstream controlSource;
- std::ostringstream evaluationSource;
+ ostringstream controlSource;
+ ostringstream evaluationSource;
controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< " out_color = in_color[0];\n"
<< "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
+ programCollection.glslSources.add("tesc") << glu::TessellationControlSource(controlSource.str()) << buildOptions;
+ programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
}
else
- {
- DE_FATAL("Unsupported shader stage");
- }
+ TCU_THROW(InternalError, "Unsupported shader stage");
}
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- if (OPTYPE_ELECT == caseDef.opType)
- {
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
- src << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << subgroups::getSharedMemoryBallotHelper()
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << " uint value = " << UNELECTED_VALUE << ";\n"
- << " if (subgroupElect())\n"
- << " {\n"
- << " value = " << ELECTED_VALUE << ";\n"
- << " }\n"
- << " uvec4 bits = bitCount(sharedMemoryBallot(value == " << ELECTED_VALUE << "));\n"
- << " result[offset] = bits.x + bits.y + bits.z + bits.w;\n"
- << "}\n";
+ if (fragment)
+ result.resize(result.size() + 1);
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
+ if (caseDef.opType == OPTYPE_ELECT)
+ {
+ for (size_t i = 0; i < result.size(); ++i)
{
- std::ostringstream testSrc;
- testSrc << " uint tempRes;\n"
- << " if (subgroupElect())\n"
- << " {\n"
- << " tempRes = " << ELECTED_VALUE << ";\n"
- << " atomicAdd(numSubgroupsExecuted, 1);\n"
- << " }\n"
- << " else\n"
- << " {\n"
- << " tempRes = " << UNELECTED_VALUE << ";\n"
- << " }\n";
+ const bool frag = (i == stageCount);
+ const size_t binding1 = i;
+ const size_t binding2 = stageCount + i;
+ if (frag)
{
- std::ostringstream vertex;
- vertex << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 4, std430) buffer Buffer2\n"
- << "{\n"
- << " uint numSubgroupsExecuted;\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSrc.str()
- << " result[gl_VertexIndex] = tempRes;\n"
- << " float pixelSize = 2.0f/1024.0f;\n"
- << " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- << " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ result[i] += "layout(location = 0) out uint result;\n";
}
-
+ else
{
- std::ostringstream tesc;
- tesc << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout(vertices=1) out;\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 5, std430) buffer Buffer2\n"
- << "{\n"
- << " uint numSubgroupsExecuted;\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSrc.str()
- << " result[gl_PrimitiveID] = tempRes;\n"
- << " if (gl_InvocationID == 0)\n"
- << " {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[0].gl_PointSize;\n" : "" )
- << "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ result[i] +=
+ "layout(set = 0, binding = " + de::toString(binding1) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
}
+ result[i] +=
+ "layout(set = 0, binding = " + de::toString(binding2) + ", std430) buffer Buffer2\n"
+ "{\n"
+ " uint numSubgroupsExecuted;\n"
+ "};\n";
+ }
+ }
+ else
+ {
+ for (size_t i = 0; i < result.size(); ++i)
+ {
+ const bool frag = (i == stageCount);
+ const size_t binding1 = i;
+ const size_t binding2 = stageCount + 4 * i;
+ const size_t binding3 = stageCount + 4 * i + 1;
+ const size_t binding4 = stageCount + 4 * i + 2;
+ const size_t binding5 = stageCount + 4 * i + 3;
+
+ if (frag)
{
- std::ostringstream tese;
- tese << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout(isolines) in;\n"
- << "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 6, std430) buffer Buffer2\n"
- << "{\n"
- << " uint numSubgroupsExecuted;\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSrc.str()
- << " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempRes;\n"
- << " float pixelSize = 2.0f/1024.0f;\n"
- << " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" )
- << "}\n";
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ result[i] = "layout(location = 0) out uint result;\n";
}
-
+ else
{
- std::ostringstream geometry;
- geometry << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout(${TOPOLOGY}) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 7, std430) buffer Buffer2\n"
- << "{\n"
- << " uint numSubgroupsExecuted;\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSrc.str()
- << " result[gl_PrimitiveIDIn] = tempRes;\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" )
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
- subgroups::addGeometryShadersFromTemplate(geometry.str(), vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
+ result[i] +=
+ "layout(set = 0, binding = " + de::toString(binding1) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
}
- {
- std::ostringstream fragment;
- fragment << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout(location = 0) out uint data;\n"
- << "layout(set = 0, binding = 8, std430) buffer Buffer\n"
- << "{\n"
- << " uint numSubgroupsExecuted;\n"
- << "};\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_HelperInvocation) return;\n"
- << testSrc.str()
- << " data = tempRes;\n"
- << "}\n";
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment.str())<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- subgroups::addNoSubgroupShader(programCollection);
+ result[i] +=
+ "layout(set = 0, binding = " + de::toString(binding2) + ", std430) buffer Buffer2\n"
+ "{\n"
+ " uint tempBuffer[];\n"
+ "};\n"
+ "layout(set = 0, binding = " + de::toString(binding3) + ", std430) buffer Buffer3\n"
+ "{\n"
+ " uint subgroupID;\n"
+ "};\n"
+ "layout(set = 0, binding = " + de::toString(binding4) + ", std430) buffer Buffer4\n"
+ "{\n"
+ " uint value;\n"
+ "};\n"
+ "layout(set = 0, binding = " + de::toString(binding5) + ", r32ui) uniform uimage2D tempImage;\n";
}
}
- else
- {
- std::ostringstream bdy;
- switch (caseDef.opType)
- {
- default:
- DE_FATAL("Unhandled op type!");
- break;
- case OPTYPE_SUBGROUP_BARRIER:
- case OPTYPE_SUBGROUP_MEMORY_BARRIER:
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER:
- bdy << " if (subgroupElect())\n"
- << " {\n"
- << " tempBuffer[id] = value;\n"
- << " }\n"
- << " " << getOpTypeName(caseDef.opType) << "();\n"
- << " tempResult = tempBuffer[id];\n";
- break;
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED:
- bdy << " if (subgroupElect())\n"
- << " {\n"
- << " tempShared[localId] = value;\n"
- << " }\n"
- << " subgroupMemoryBarrierShared();\n"
- << " tempResult = tempShared[localId];\n";
- break;
- case OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE:
- bdy << " if (subgroupElect())\n"
- << " {\n"
- << " imageStore(tempImage, ivec2(id, 0), ivec4(value));\n"
- << " }\n"
- << " subgroupMemoryBarrierImage();\n"
- << " tempResult = imageLoad(tempImage, ivec2(id, 0)).x;\n";
- break;
- }
+ return result;
+}
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
+string getTestString (const CaseDefinition& caseDef)
+{
+ stringstream body;
- src << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
- << "{\n"
- << " uint tempBuffer[];\n"
- << "};\n"
- << "layout(set = 0, binding = 2, std430) buffer Buffer3\n"
- << "{\n"
- << " uint value;\n"
- << "};\n"
- << "layout(set = 0, binding = 3, r32ui) uniform uimage2D tempImage;\n"
- << "shared uint tempShared[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z];\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << " uint localId = gl_SubgroupID;\n"
- << " uint id = globalSize.x * ((globalSize.y * "
- "gl_WorkGroupID.z) + gl_WorkGroupID.y) + "
- "gl_WorkGroupID.x + localId;\n"
- << " uint tempResult = 0;\n"
- << bdy.str()
- << " result[offset] = tempResult;\n"
- << "}\n";
+ if (caseDef.opType != OPTYPE_ELECT && (isAllGraphicsStages(caseDef.shaderStage) || isAllRayTracingStages(caseDef.shaderStage)))
+ {
+ body << " uint id = 0;\n"
+ " if (subgroupElect())\n"
+ " {\n"
+ " id = atomicAdd(subgroupID, 1);\n"
+ " }\n"
+ " id = subgroupBroadcastFirst(id);\n"
+ " uint localId = id;\n"
+ " uint tempResult = 0;\n";
+ }
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
+ switch (caseDef.opType)
+ {
+ case OPTYPE_ELECT:
+ if (isAllComputeStages(caseDef.shaderStage))
{
- const string vertex =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) buffer Buffer2\n"
- "{\n"
- " uint tempBuffer[];\n"
- "};\n"
- "layout(set = 0, binding = 5, std430) buffer Buffer3\n"
- "{\n"
- " uint subgroupID;\n"
- "};\n"
- "layout(set = 0, binding = 6, std430) buffer Buffer4\n"
- "{\n"
- " uint value;\n"
- "};\n"
- "layout(set = 0, binding = 7, r32ui) uniform uimage2D tempImage;\n"
- "void main (void)\n"
- "{\n"
- " uint id = 0;\n"
- " if (subgroupElect())\n"
- " {\n"
- " id = atomicAdd(subgroupID, 1);\n"
- " }\n"
- " id = subgroupBroadcastFirst(id);\n"
- " uint localId = id;\n"
- " uint tempResult = 0;\n"
- + bdy.str() +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ body << " uint value = " << UNELECTED_VALUE << ";\n"
+ " if (subgroupElect())\n"
+ " {\n"
+ " value = " << ELECTED_VALUE << ";\n"
+ " }\n"
+ " uvec4 bits = bitCount(sharedMemoryBallot(value == " << ELECTED_VALUE << "));\n"
+ " tempRes = bits.x + bits.y + bits.z + bits.w;\n";
}
-
+ else
{
- const string tesc =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 8, std430) buffer Buffer2\n"
- "{\n"
- " uint tempBuffer[];\n"
- "};\n"
- "layout(set = 0, binding = 9, std430) buffer Buffer3\n"
- "{\n"
- " uint subgroupID;\n"
- "};\n"
- "layout(set = 0, binding = 10, std430) buffer Buffer4\n"
- "{\n"
- " uint value;\n"
- "};\n"
- "layout(set = 0, binding = 11, r32ui) uniform uimage2D tempImage;\n"
- "void main (void)\n"
- "{\n"
- " uint id = 0;\n"
- " if (subgroupElect())\n"
- " {\n"
- " id = atomicAdd(subgroupID, 1);\n"
- " }\n"
- " id = subgroupBroadcastFirst(id);\n"
- " uint localId = id;\n"
- " uint tempResult = 0;\n"
- + bdy.str() +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "" )
- + "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ body << " if (subgroupElect())\n"
+ " {\n"
+ " tempRes = " << ELECTED_VALUE << ";\n"
+ " atomicAdd(numSubgroupsExecuted, 1);\n"
+ " }\n"
+ " else\n"
+ " {\n"
+ " tempRes = " << UNELECTED_VALUE << ";\n"
+ " }\n";
}
+ break;
- {
- const string tese =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 12, std430) buffer Buffer2\n"
- "{\n"
- " uint tempBuffer[];\n"
- "};\n"
- "layout(set = 0, binding = 13, std430) buffer Buffer3\n"
- "{\n"
- " uint subgroupID;\n"
- "};\n"
- "layout(set = 0, binding = 14, std430) buffer Buffer4\n"
- "{\n"
- " uint value;\n"
- "};\n"
- "layout(set = 0, binding = 15, r32ui) uniform uimage2D tempImage;\n"
- "void main (void)\n"
- "{\n"
- " uint id = 0;\n"
- " if (subgroupElect())\n"
+ case OPTYPE_SUBGROUP_BARRIER:
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER:
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_BUFFER:
+ body << " if (subgroupElect())\n"
" {\n"
- " id = atomicAdd(subgroupID, 1);\n"
+ " tempBuffer[id] = value;\n"
" }\n"
- " id = subgroupBroadcastFirst(id);\n"
- " uint localId = id;\n"
- " uint tempResult = 0;\n"
- + bdy.str() +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" )
- + "}\n";
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ " " << getOpTypeName(caseDef.opType) << "();\n"
+ " tempResult = tempBuffer[id];\n";
+ break;
- {
- const string geometry =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 16, std430) buffer Buffer2\n"
- "{\n"
- " uint tempBuffer[];\n"
- "};\n"
- "layout(set = 0, binding = 17, std430) buffer Buffer3\n"
- "{\n"
- " uint subgroupID;\n"
- "};\n"
- "layout(set = 0, binding = 18, std430) buffer Buffer4\n"
- "{\n"
- " uint value;\n"
- "};\n"
- "layout(set = 0, binding = 19, r32ui) uniform uimage2D tempImage;\n"
- "void main (void)\n"
- "{\n"
- " uint id = 0;\n"
- " if (subgroupElect())\n"
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED:
+ body << " if (subgroupElect())\n"
" {\n"
- " id = atomicAdd(subgroupID, 1);\n"
+ " tempShared[localId] = value;\n"
" }\n"
- " id = subgroupBroadcastFirst(id);\n"
- " uint localId = id;\n"
- " uint tempResult = 0;\n"
- + bdy.str() +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "" ) +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
+ " subgroupMemoryBarrierShared();\n"
+ " tempResult = tempShared[localId];\n";
+ break;
- {
- const string fragment =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(location = 0) out uint result;\n"
- "layout(set = 0, binding = 20, std430) buffer Buffer1\n"
- "{\n"
- " uint tempBuffer[];\n"
- "};\n"
- "layout(set = 0, binding = 21, std430) buffer Buffer2\n"
- "{\n"
- " uint subgroupID;\n"
- "};\n"
- "layout(set = 0, binding = 22, std430) buffer Buffer3\n"
- "{\n"
- " uint value;\n"
- "};\n"
- "layout(set = 0, binding = 23, r32ui) uniform uimage2D tempImage;\n"
- "void main (void)\n"
- "{\n"
- " if (gl_HelperInvocation) return;\n"
- " uint id = 0;\n"
- " if (subgroupElect())\n"
+ case OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE:
+ body << " if (subgroupElect())\n"
" {\n"
- " id = atomicAdd(subgroupID, 1);\n"
+ " imageStore(tempImage, ivec2(id, 0), ivec4(value));\n"
" }\n"
- " id = subgroupBroadcastFirst(id);\n"
- " uint localId = id;\n"
- " uint tempResult = 0;\n"
- + bdy.str() +
- " result = tempResult;\n"
- "}\n";
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ " subgroupMemoryBarrierImage();\n"
+ " tempResult = imageLoad(tempImage, ivec2(id, 0)).x;\n";
+ break;
- subgroups::addNoSubgroupShader(programCollection);
- }
+ default:
+ TCU_THROW(InternalError, "Unhandled op type!");
+ }
+
+ if (caseDef.opType != OPTYPE_ELECT && (isAllGraphicsStages(caseDef.shaderStage) || isAllRayTracingStages(caseDef.shaderStage)))
+ {
+ body << " tempRes = tempResult;\n";
+ }
+
+ return body.str();
+}
+
+string getExtHeader (const CaseDefinition& caseDef)
+{
+ const string extensions = (caseDef.opType == OPTYPE_ELECT)
+ ? "#extension GL_KHR_shader_subgroup_basic: enable\n"
+ : "#extension GL_KHR_shader_subgroup_basic: enable\n"
+ "#extension GL_KHR_shader_subgroup_ballot: enable\n";
+ return extensions;
+}
+
+void initComputePrograms (SourceCollections& programCollection,
+ CaseDefinition& caseDef,
+ const string& extensions,
+ const string& testSrc,
+ const ShaderBuildOptions& buildOptions)
+{
+ if (OPTYPE_ELECT == caseDef.opType)
+ {
+ ostringstream src;
+
+ src << "#version 450\n"
+ << extensions
+ << "layout (local_size_x_id = 0, local_size_y_id = 1, "
+ "local_size_z_id = 2) in;\n"
+ << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
+ << "{\n"
+ << " uint result[];\n"
+ << "};\n"
+ << "\n"
+ << subgroups::getSharedMemoryBallotHelper()
+ << "void main (void)\n"
+ << "{\n"
+ << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
+ << " highp uint offset = globalSize.x * ((globalSize.y * "
+ "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
+ "gl_GlobalInvocationID.x;\n"
+ << " uint value = " << UNELECTED_VALUE << ";\n"
+ << " if (subgroupElect())\n"
+ << " {\n"
+ << " value = " << ELECTED_VALUE << ";\n"
+ << " }\n"
+ << " uvec4 bits = bitCount(sharedMemoryBallot(value == " << ELECTED_VALUE << "));\n"
+ << " result[offset] = bits.x + bits.y + bits.z + bits.w;\n"
+ << "}\n";
+
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str()) << buildOptions;
+ }
+ else
+ {
+ ostringstream src;
+
+ src << "#version 450\n"
+ << "#extension GL_KHR_shader_subgroup_basic: enable\n"
+ << "layout (local_size_x_id = 0, local_size_y_id = 1, "
+ "local_size_z_id = 2) in;\n"
+ << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
+ << "{\n"
+ << " uint result[];\n"
+ << "};\n"
+ << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
+ << "{\n"
+ << " uint tempBuffer[];\n"
+ << "};\n"
+ << "layout(set = 0, binding = 2, std430) buffer Buffer3\n"
+ << "{\n"
+ << " uint value;\n"
+ << "};\n"
+ << "layout(set = 0, binding = 3, r32ui) uniform uimage2D tempImage;\n"
+ << "shared uint tempShared[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z];\n"
+ << "\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
+ << " highp uint offset = globalSize.x * ((globalSize.y * "
+ "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
+ "gl_GlobalInvocationID.x;\n"
+ << " uint localId = gl_SubgroupID;\n"
+ << " uint id = globalSize.x * ((globalSize.y * "
+ "gl_WorkGroupID.z) + gl_WorkGroupID.y) + "
+ "gl_WorkGroupID.x + localId;\n"
+ << " uint tempResult = 0;\n"
+ << testSrc
+ << " result[offset] = tempResult;\n"
+ << "}\n";
+
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str()) << buildOptions;
}
}
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestString(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupport = *caseDef.geometryPointSizeSupported;
+
+ if (isAllComputeStages(caseDef.shaderStage))
+ initComputePrograms(programCollection, caseDef, extHeader, testSrc, buildOptions);
+ else
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupport, extHeader, testSrc, "", headDeclarations, true);
+}
+
void supportedCheck (Context& context, CaseDefinition caseDef)
{
- DE_UNREF(caseDef);
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
+ if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BASIC_BIT))
+ TCU_FAIL("supportedOperations will have the VK_SUBGROUP_FEATURE_BASIC_BIT bit set if any of the physical device's queues support VK_QUEUE_GRAPHICS_BIT or VK_QUEUE_COMPUTE_BIT.");
+
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
-}
-
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
-{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
- {
- if (subgroups::areSubgroupOperationsRequiredForStage(
- caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
-
- if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BASIC_BIT))
- {
- return tcu::TestStatus::fail(
- "Subgroup feature " +
- subgroups::getSubgroupFeatureName(VK_SUBGROUP_FEATURE_BASIC_BIT) +
- " is a required capability!");
- }
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
if (OPTYPE_ELECT != caseDef.opType && VK_SHADER_STAGE_COMPUTE_BIT != caseDef.shaderStage)
{
TCU_THROW(NotSupportedError, "Subgroup basic operation non-compute stage test required that ballot operations are supported!");
}
}
+}
- const deUint32 inputDatasCount = OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE == caseDef.opType ? 3u : 2u;
- std::vector<subgroups::SSBOData> inputDatas (inputDatasCount);
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+{
+ const deUint32 inputDatasCount = OPTYPE_SUBGROUP_MEMORY_BARRIER_IMAGE == caseDef.opType ? 3u : 2u;
+ vector<subgroups::SSBOData> inputDatas (inputDatasCount);
inputDatas[0].format = VK_FORMAT_R32_UINT;
inputDatas[0].layout = subgroups::SSBOData::LayoutStd140;
caseDef.shaderStage);
}
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BASIC_BIT))
- {
- return tcu::TestStatus::fail(
- "Subgroup feature " +
- subgroups::getSubgroupFeatureName(VK_SUBGROUP_FEATURE_BASIC_BIT) +
- " is a required capability!");
- }
-
- if (OPTYPE_ELECT != caseDef.opType && VK_SHADER_STAGE_COMPUTE_BIT != caseDef.shaderStage)
- {
- if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BALLOT_BIT))
- {
- TCU_THROW(NotSupportedError, "Subgroup basic operation non-compute stage test required that ballot operations are supported!");
- }
- }
-
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail("Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
if (OPTYPE_ELECT == caseDef.opType)
{
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupElect);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkComputeSubgroupElect,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0u, DE_NULL, checkComputeSubgroupElect,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
else
{
- const deUint32 inputDatasCount = 3;
- subgroups::SSBOData inputDatas[inputDatasCount];
- inputDatas[0].format = VK_FORMAT_R32_UINT;
- inputDatas[0].layout = subgroups::SSBOData::LayoutStd430;
- inputDatas[0].numElements = SHADER_BUFFER_SIZE;
- inputDatas[0].initializeType = subgroups::SSBOData::InitializeNone;
-
- inputDatas[1].format = VK_FORMAT_R32_UINT;
- inputDatas[1].layout = subgroups::SSBOData::LayoutStd430;
- inputDatas[1].numElements = 1;
- inputDatas[1].initializeType = subgroups::SSBOData::InitializeNonZero;
-
- inputDatas[2].format = VK_FORMAT_R32_UINT;
- inputDatas[2].layout = subgroups::SSBOData::LayoutPacked;
- inputDatas[2].numElements = SHADER_BUFFER_SIZE;
- inputDatas[2].initializeType = subgroups::SSBOData::InitializeNone;
- inputDatas[2].isImage = true;
+ const deUint32 inputDatasCount = 3;
+ const subgroups::SSBOData inputDatas[inputDatasCount] =
+ {
+ {
+ subgroups::SSBOData::InitializeNone, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ SHADER_BUFFER_SIZE, // vk::VkDeviceSize numElements;
+ },
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ 1, // vk::VkDeviceSize numElements;
+ },
+ {
+ subgroups::SSBOData::InitializeNone, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutPacked, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ SHADER_BUFFER_SIZE, // vk::VkDeviceSize numElements;
+ true, // bool isImage;
+ },
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputDatas, inputDatasCount, DE_NULL, checkComputeSubgroupBarriers);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputDatas, inputDatasCount, DE_NULL, checkComputeSubgroupBarriers,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputDatas, inputDatasCount, DE_NULL, checkComputeSubgroupBarriers,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
if (!subgroups::isFragmentSSBOSupportedForDevice(context))
{
TCU_THROW(NotSupportedError, "Subgroup basic operation require that the fragment stage be able to write to SSBOs!");
}
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const VkShaderStageFlags stagesBits[] =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_GEOMETRY_BIT,
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ };
if (OPTYPE_ELECT == caseDef.opType)
{
- const deUint32 inputCount = 5u;
- subgroups::SSBOData inputData[inputCount];
-
- inputData[0].format = VK_FORMAT_R32_UINT;
- inputData[0].layout = subgroups::SSBOData::LayoutStd430;
- inputData[0].numElements = 1;
- inputData[0].initializeType = subgroups::SSBOData::InitializeZero;
- inputData[0].binding = 4u;
- inputData[0].stages = VK_SHADER_STAGE_VERTEX_BIT;
-
- inputData[1].format = VK_FORMAT_R32_UINT;
- inputData[1].layout = subgroups::SSBOData::LayoutStd430;
- inputData[1].numElements = 1;
- inputData[1].initializeType = subgroups::SSBOData::InitializeZero;
- inputData[1].binding = 5u;
- inputData[1].stages = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
-
- inputData[2].format = VK_FORMAT_R32_UINT;
- inputData[2].layout = subgroups::SSBOData::LayoutStd430;
- inputData[2].numElements = 1;
- inputData[2].initializeType = subgroups::SSBOData::InitializeZero;
- inputData[2].binding = 6u;
- inputData[2].stages = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT;
-
- inputData[3].format = VK_FORMAT_R32_UINT;
- inputData[3].layout = subgroups::SSBOData::LayoutStd430;
- inputData[3].numElements = 1;
- inputData[3].initializeType = subgroups::SSBOData::InitializeZero;
- inputData[3].binding = 7u;
- inputData[3].stages = VK_SHADER_STAGE_GEOMETRY_BIT;
-
- inputData[4].format = VK_FORMAT_R32_UINT;
- inputData[4].layout = subgroups::SSBOData::LayoutStd430;
- inputData[4].numElements = 1;
- inputData[4].initializeType = subgroups::SSBOData::InitializeZero;
- inputData[4].binding = 8u;
- inputData[4].stages = VK_SHADER_STAGE_FRAGMENT_BIT;
+ const deUint32 inputCount = DE_LENGTH_OF_ARRAY(stagesBits);
+ subgroups::SSBOData inputData[inputCount];
+
+ for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(stagesBits); ++ndx)
+ {
+ inputData[ndx] =
+ {
+ subgroups::SSBOData::InitializeZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ 1, // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4 + ndx, // deUint32 binding;
+ stagesBits[ndx], // vk::VkShaderStageFlags stages;
+ };
+ }
return subgroups::allStages(context, VK_FORMAT_R32_UINT, inputData, inputCount, DE_NULL, checkVertexPipelineStagesSubgroupElect, stages);
}
else
{
- const VkShaderStageFlagBits stagesBits[] =
- {
- VK_SHADER_STAGE_VERTEX_BIT,
- VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
- VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
- VK_SHADER_STAGE_GEOMETRY_BIT,
- VK_SHADER_STAGE_FRAGMENT_BIT,
- };
-
- const deUint32 inputDatasCount = DE_LENGTH_OF_ARRAY(stagesBits) * 4u;
- subgroups::SSBOData inputDatas[inputDatasCount];
+ const deUint32 inputDatasCount = DE_LENGTH_OF_ARRAY(stagesBits) * 4u;
+ subgroups::SSBOData inputDatas[inputDatasCount];
for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(stagesBits); ++ndx)
{
const deUint32 index = ndx*4;
+
inputDatas[index].format = VK_FORMAT_R32_UINT;
inputDatas[index].layout = subgroups::SSBOData::LayoutStd430;
inputDatas[index].numElements = SHADER_BUFFER_SIZE;
return subgroups::allStages(context, VK_FORMAT_R32_UINT, inputDatas, inputDatasCount, DE_NULL, checkVertexPipelineStagesSubgroupBarriers, stages);
}
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const VkShaderStageFlags stagesBits[] =
+ {
+ VK_SHADER_STAGE_RAYGEN_BIT_KHR,
+ VK_SHADER_STAGE_ANY_HIT_BIT_KHR,
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,
+ VK_SHADER_STAGE_MISS_BIT_KHR,
+ VK_SHADER_STAGE_INTERSECTION_BIT_KHR,
+ VK_SHADER_STAGE_CALLABLE_BIT_KHR,
+ };
+ const deUint32 stagesCount = DE_LENGTH_OF_ARRAY(stagesBits);
+
+ if (OPTYPE_ELECT == caseDef.opType)
+ {
+ const deUint32 inputDataCount = stagesCount;
+ subgroups::SSBOData inputData[inputDataCount];
+
+ for (deUint32 ndx = 0; ndx < inputDataCount; ++ndx)
+ {
+ inputData[ndx].format = VK_FORMAT_R32_UINT;
+ inputData[ndx].layout = subgroups::SSBOData::LayoutStd430;
+ inputData[ndx].numElements = 1;
+ inputData[ndx].initializeType = subgroups::SSBOData::InitializeZero;
+ inputData[ndx].binding = stagesCount + ndx;
+ inputData[ndx].stages = stagesBits[ndx];
+ }
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, inputData, inputDataCount, DE_NULL, checkVertexPipelineStagesSubgroupElect, stages);
+ }
+ else
+ {
+ const deUint32 datasPerStage = 4u;
+ const deUint32 inputDatasCount = datasPerStage * stagesCount;
+ subgroups::SSBOData inputDatas[inputDatasCount];
+
+ for (deUint32 ndx = 0; ndx < stagesCount; ++ndx)
+ {
+ const deUint32 index = datasPerStage * ndx;
+
+ for (deUint32 perStageNdx = 0; perStageNdx < datasPerStage; ++perStageNdx)
+ {
+ inputDatas[index + perStageNdx].format = VK_FORMAT_R32_UINT;
+ inputDatas[index + perStageNdx].layout = subgroups::SSBOData::LayoutStd430;
+ inputDatas[index + perStageNdx].stages = stagesBits[ndx];
+ inputDatas[index + perStageNdx].isImage = false;
+ }
+
+ inputDatas[index + 0].numElements = SHADER_BUFFER_SIZE;
+ inputDatas[index + 0].initializeType = subgroups::SSBOData::InitializeNonZero;
+ inputDatas[index + 0].binding = index + stagesCount;
+
+ inputDatas[index + 1].numElements = 1;
+ inputDatas[index + 1].initializeType = subgroups::SSBOData::InitializeZero;
+ inputDatas[index + 1].binding = index + stagesCount + 1u;
+
+ inputDatas[index + 2].numElements = 1;
+ inputDatas[index + 2].initializeType = subgroups::SSBOData::InitializeNonZero;
+ inputDatas[index + 2].binding = index + stagesCount + 2u;
+
+ inputDatas[index + 3].numElements = SHADER_BUFFER_SIZE;
+ inputDatas[index + 3].initializeType = subgroups::SSBOData::InitializeNone;
+ inputDatas[index + 3].isImage = true;
+ inputDatas[index + 3].binding = index + stagesCount + 3u;
+ }
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, inputDatas, inputDatasCount, DE_NULL, checkVertexPipelineStagesSubgroupBarriers, stages);
+ }
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBasicTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBasicTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup basic category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup basic category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup basic category tests: framebuffer"));
-
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "basic", "Subgroup basic category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup basic category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup basic category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup basic category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup basic category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_FRAGMENT_BIT,
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const std::string op = de::toLower(getOpTypeName(opTypeIndex));
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string op = de::toLower(getOpTypeName(opType));
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), op, "",
- supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), op + "_requiredsubgroupsize", "",
- supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+ const string testName = op + testNameSuffix;
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
- if (OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED == opTypeIndex)
+ if (OPTYPE_SUBGROUP_MEMORY_BARRIER_SHARED == opType)
{
// Shared isn't available in non compute shaders.
continue;
}
{
- const CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(),
- op, "",
- supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
}
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- if (OPTYPE_ELECT == opTypeIndex && stageIndex == 0)
+ if (OPTYPE_ELECT == opType && stages[stageIndex] == VK_SHADER_STAGE_FRAGMENT_BIT)
continue; // This is not tested. I don't know why.
- const CaseDefinition caseDefFrag = {opTypeIndex, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(),
- op + "_" + getShaderStageName(caseDefFrag.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDefFrag);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = op + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "basic", "Subgroup basic category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBasicTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBasicTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
namespace subgroups
{
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+enum TestType
{
- DE_UNREF(internalData);
- return check(datas, width, 1);
-}
+ TEST_TYPE_SUBGROUP_EQ_MASK = 0,
+ TEST_TYPE_SUBGROUP_GE_MASK = 1,
+ TEST_TYPE_SUBGROUP_GT_MASK = 2,
+ TEST_TYPE_SUBGROUP_LE_MASK = 3,
+ TEST_TYPE_SUBGROUP_LT_MASK = 4,
+ TEST_TYPE_LAST
+};
-static bool checkComputeStage(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+const char* TestTypeSpirvBuiltins[] =
{
- DE_UNREF(internalData);
- return checkCompute(datas, numWorkgroups, localSize, 1);
-}
+ "SubgroupEqMask",
+ "SubgroupGeMask",
+ "SubgroupGtMask",
+ "SubgroupLeMask",
+ "SubgroupLtMask",
+};
+DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(TestTypeSpirvBuiltins) == TEST_TYPE_LAST);
+
+const char* TestTypeMathOps[] =
+{
+ "==",
+ ">=",
+ ">",
+ "<=",
+ "<",
+};
+DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(TestTypeMathOps) == TEST_TYPE_LAST);
+
+const char* TestTypeSpirvOps[] =
+{
+ "OpIEqual",
+ "OpUGreaterThanEqual",
+ "OpUGreaterThan",
+ "OpULessThanEqual",
+ "OpULessThan",
+};
+DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(TestTypeSpirvOps) == TEST_TYPE_LAST);
namespace
{
struct CaseDefinition
{
- std::string varName;
+ TestType testType;
VkShaderStageFlags shaderStage;
de::SharedPtr<bool> geometryPointSizeSupported;
deBool requiredSubgroupSize;
};
}
-std::string subgroupComparison (const CaseDefinition& caseDef)
+static inline string getTestSpirvBuiltinName (TestType testType)
{
- if ("gl_SubgroupEqMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "%56 = OpIEqual %11 %53 %55\n";
- else
- return "%38 = OpIEqual %16 %35 %37\n";
- }
- else if ("gl_SubgroupGeMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "%56 = OpUGreaterThanEqual %11 %53 %55\n";
- else
- return "%38 = OpUGreaterThanEqual %16 %35 %37\n";
- }
- else if ("gl_SubgroupGtMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "%56 = OpUGreaterThan %11 %53 %55\n";
- else
- return "%38 = OpUGreaterThan %16 %35 %37\n";
- }
- else if ("gl_SubgroupLeMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "%56 = OpULessThanEqual %11 %53 %55\n";
- else
- return "%38 = OpULessThanEqual %16 %35 %37\n";
- }
- else if ("gl_SubgroupLtMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "%56 = OpULessThan %11 %53 %55\n";
- else
- return "%38 = OpULessThan %16 %35 %37\n";
- }
- return "";
+ return TestTypeSpirvBuiltins[static_cast<deUint32>(testType)];
}
-std::string varSubgroupMask (const CaseDefinition& caseDef)
+static inline string getTestName (TestType testType)
{
- if ("gl_SubgroupEqMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "OpDecorate %40 BuiltIn SubgroupEqMask\n";
- else
- return "OpDecorate %22 BuiltIn SubgroupEqMask\n";
- }
- else if ("gl_SubgroupGeMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "OpDecorate %40 BuiltIn SubgroupGeMask\n";
- else
- return "OpDecorate %22 BuiltIn SubgroupGeMask\n";
- }
- else if ("gl_SubgroupGtMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "OpDecorate %40 BuiltIn SubgroupGtMask\n";
- else
- return "OpDecorate %22 BuiltIn SubgroupGtMask\n";
- }
- else if ("gl_SubgroupLeMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "OpDecorate %40 BuiltIn SubgroupLeMask\n";
- else
- return "OpDecorate %22 BuiltIn SubgroupLeMask\n";
- }
- else if ("gl_SubgroupLtMask" == caseDef.varName)
- {
- if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return "OpDecorate %40 BuiltIn SubgroupLtMask\n";
- else
- return "OpDecorate %22 BuiltIn SubgroupLtMask\n";
- }
- return "";
+ return de::toLower(getTestSpirvBuiltinName(testType));
+}
+
+static inline string getTestVarName (TestType testType)
+{
+ return string("gl_") + getTestSpirvBuiltinName(testType);
+}
+
+static inline string getTestMathOp (TestType testType)
+{
+ return TestTypeMathOps[static_cast<deUint32>(testType)];
+}
+
+static inline string getTestSpirvOp (TestType testType)
+{
+ return TestTypeSpirvOps[static_cast<deUint32>(testType)];
+}
+
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
+{
+ DE_UNREF(internalData);
+
+ return check(datas, width, 1);
+}
+
+static bool checkComputeStage (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
+{
+ DE_UNREF(internalData);
+
+ return checkCompute(datas, numWorkgroups, localSize, 1);
+}
+
+static inline string subgroupComparison (const CaseDefinition& caseDef)
+{
+ const string spirvOp = getTestSpirvOp(caseDef.testType);
+ const string result = (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
+ ? "%56 = " + spirvOp + " %11 %53 %55\n"
+ : "%38 = " + spirvOp + " %16 %35 %37\n";
+
+ return result;
+}
+
+static inline string varSubgroupMask (const CaseDefinition& caseDef)
+{
+ const string spirvBuiltin = getTestSpirvBuiltinName(caseDef.testType);
+ const string result = (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
+ ? "OpDecorate %40 BuiltIn " + spirvBuiltin + "\n"
+ : "OpDecorate %22 BuiltIn " + spirvBuiltin + "\n";
+
+ return result;
}
-std::string subgroupMask (const CaseDefinition& caseDef)
+string subgroupMask (const CaseDefinition& caseDef)
{
- std::string comp;
- if (caseDef.varName == "gl_SubgroupEqMask")
- comp = "==";
- else if (caseDef.varName == "gl_SubgroupGeMask")
- comp = ">=";
- else if (caseDef.varName == "gl_SubgroupGtMask")
- comp = ">";
- else if (caseDef.varName == "gl_SubgroupLeMask")
- comp = "<=";
- else if (caseDef.varName == "gl_SubgroupLtMask")
- comp = "<";
-
- std::ostringstream bdy;
+ const string varName = getTestVarName(caseDef.testType);
+ const string comp = getTestMathOp(caseDef.testType);
+ ostringstream bdy;
bdy << " uint tempResult = 0x1;\n"
<< " uvec4 mask = subgroupBallot(true);\n"
- << " const uvec4 var = " << caseDef.varName << ";\n"
+ << " const uvec4 var = " << varName << ";\n"
<< " for (uint i = 0; i < gl_SubgroupSize; i++)\n"
<< " {\n"
<< " if ((i " << comp << " gl_SubgroupInvocationID) ^^ subgroupBallotBitExtract(var, i))\n"
<< " if (subgroupBallotBitCount(var) != c)\n"
<< " {\n"
<< " tempResult = 0;\n"
- << " }\n";
+ << " }\n"
+ << " tempRes = tempResult;\n";
+
return bdy.str();
}
void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3);
- const string comparison = subgroupComparison(caseDef);
- const string mask = varSubgroupMask(caseDef);
+ const SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ const string comparison = subgroupComparison(caseDef);
+ const string mask = varSubgroupMask(caseDef);
subgroups::setFragmentShaderFrameBuffer(programCollection);
<< glu::GeometrySource(geometry) << vk::ShaderBuildOptions(vk::SPIRV_VERSION_1_3, 0u);
*/
- std::ostringstream geometry;
+ ostringstream geometry;
geometry
<< "; SPIR-V\n"
<< "; Version: 1.3\n"
<< "OpEndPrimitive\n"
<< "OpReturn\n"
<< "OpFunctionEnd\n";
- programCollection.spirvAsmSources.add("geometry") << geometry.str() << buildOptionsSpr;
+
+ programCollection.spirvAsmSources.add("geometry") << geometry.str() << buildOptionsSpr;
}
else
{
}
}
+string getExtHeader (const CaseDefinition&)
+{
+ return "#extension GL_KHR_shader_subgroup_ballot: enable\n";
+}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- const string bdy = subgroupMask(caseDef);
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (fragment)
+ result.reserve(result.size() + 1);
+
+ for (size_t i = 0; i < result.size(); ++i)
{
- std::ostringstream src;
-
- src << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Output\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << bdy
- << " result[offset] = tempResult;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Output\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
}
- else
- {
- {
- const string vertex =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(set = 0, binding = 0, std430) buffer Output\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- {
- const string tesc =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Output\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string tese =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Output\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
+ if (fragment)
+ {
+ const string fragPart =
+ "layout(location = 0) out uint result;\n";
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ result.push_back(fragPart);
+ }
- {
- const string geometry =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Output\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
+ return result;
+}
- {
- const string fragment =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- "layout(location = 0) out uint result;\n"
- "void main (void)\n"
- "{\n"
- + bdy +
- " result = tempResult;\n"
- "}\n";
-
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = subgroupMask(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
- subgroups::addNoSubgroupShader(programCollection);
- }
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, true, extHeader, testSrc, "", headDeclarations);
}
void supportedCheck (Context& context, CaseDefinition caseDef)
{
- DE_UNREF(caseDef);
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
-}
-
-tcu::TestStatus noSSBOtest(Context& context, const CaseDefinition caseDef)
-{
- if (!areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
- {
- if (areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " + getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BALLOT_BIT))
{
TCU_THROW(NotSupportedError, "Device does not support subgroup ballot operations");
}
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if ((VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) & caseDef.shaderStage )
- return makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
-
- return makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest(Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BALLOT_BIT))
+ switch (caseDef.shaderStage)
{
- TCU_THROW(NotSupportedError, "Device does not support subgroup ballot operations");
+ case VK_SHADER_STAGE_VERTEX_BIT: return makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
}
+}
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+TestStatus test(Context& context, const CaseDefinition caseDef)
+{
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " + getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
if (caseDef.requiredSubgroupSize == DE_FALSE)
return makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeStage);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeStage,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeStage,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
- {
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
return subgroups::allStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
-tcu::TestCaseGroup* createSubgroupsBuiltinMaskVarTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBuiltinMaskVarTests (TestContext& testCtx)
{
-de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup builtin mask category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup builtin mask category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup builtin mask category tests: framebuffer"));
-
- const char* const all_stages_vars[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "builtin_mask_var", "Subgroup builtin mask variable tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup builtin mask category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup builtin mask category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup builtin mask category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup builtin mask category tests: ray tracing"));
+ const TestType allStagesBuiltinVars[] =
{
- "SubgroupEqMask",
- "SubgroupGeMask",
- "SubgroupGtMask",
- "SubgroupLeMask",
- "SubgroupLtMask",
+ TEST_TYPE_SUBGROUP_EQ_MASK,
+ TEST_TYPE_SUBGROUP_GE_MASK,
+ TEST_TYPE_SUBGROUP_GT_MASK,
+ TEST_TYPE_SUBGROUP_LE_MASK,
+ TEST_TYPE_SUBGROUP_LT_MASK,
};
-
- const VkShaderStageFlags stages[] =
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
-
- for (int a = 0; a < DE_LENGTH_OF_ARRAY(all_stages_vars); ++a)
+ for (int a = 0; a < DE_LENGTH_OF_ARRAY(allStagesBuiltinVars); ++a)
{
- const std::string var = all_stages_vars[a];
- const std::string varLower = de::toLower(var);
+ const TestType testType = allStagesBuiltinVars[a];
+ const string name = getTestName(testType);
+
+ {
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
{
- const CaseDefinition caseDef = {"gl_" + var, VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(),
- varLower, "",
- supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
}
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- CaseDefinition caseDef = {"gl_" + var, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(),
- varLower, "",
- supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(),
- varLower + "_requiredsubgroupsize", "",
- supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- const CaseDefinition caseDef = {"gl_" + var, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(),
- varLower + "_" +
- getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name + + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "builtin_mask_var", "Subgroup builtin mask variable tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBuiltinMaskVarTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBuiltinMaskVarTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
namespace subgroups
{
-bool checkVertexPipelineStagesSubgroupSize(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 subgroupSize)
+enum TestType
+{
+ TEST_TYPE_SUBGROUP_SIZE = 0,
+ TEST_TYPE_SUBGROUP_INVOCATION_ID = 1,
+ TEST_TYPE_SUBGROUP_NUM_SUBGROUPS = 2,
+ TEST_TYPE_SUBGROUP_NUM_SUBGROUP_ID = 3,
+ TEST_TYPE_LAST
+};
+
+const char* TestTypeNames[] =
+{
+ "SubgroupSize",
+ "SubgroupInvocationID",
+ "NumSubgroups",
+ "SubgroupID",
+};
+DE_STATIC_ASSERT(DE_LENGTH_OF_ARRAY(TestTypeNames) == TEST_TYPE_LAST);
+
+const char* getTestName (TestType testType)
+{
+ return TestTypeNames[static_cast<deUint32>(testType)];
+}
+
+bool checkVertexPipelineStagesSubgroupSize (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 subgroupSize)
{
DE_UNREF(internalData);
- const deUint32* data =
- reinterpret_cast<const deUint32*>(datas[0]);
+
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+
for (deUint32 x = 0; x < width; ++x)
{
deUint32 val = data[x * 4];
return true;
}
-bool checkVertexPipelineStagesSubgroupInvocationID(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 subgroupSize)
+bool checkVertexPipelineStagesSubgroupInvocationID (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 subgroupSize)
{
DE_UNREF(internalData);
- const deUint32* data =
- reinterpret_cast<const deUint32*>(datas[0]);
- vector<deUint32> subgroupInvocationHits(subgroupSize, 0);
+
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+ vector<deUint32> subgroupInvocationHits (subgroupSize, 0);
for (deUint32 x = 0; x < width; ++x)
{
- deUint32 subgroupInvocationID = data[(x * 4) + 1];
+ deUint32 subgroupInvocationID = data[(x * 4) + 1] - 1024u;
if (subgroupInvocationID >= subgroupSize)
return false;
return true;
}
-static bool checkComputeSubgroupSize(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize)
+static bool checkComputeSubgroupSize (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32 subgroupSize)
{
DE_UNREF(internalData);
+
const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 nX = 0; nX < numWorkgroups[0]; ++nX)
{
for (deUint32 lY = 0; lY < localSize[1]; ++lY)
{
- for (deUint32 lZ = 0; lZ < localSize[2];
- ++lZ)
+ for (deUint32 lZ = 0; lZ < localSize[2]; ++lZ)
{
const deUint32 globalInvocationX =
nX * localSize[0] + lX;
return true;
}
-static bool checkComputeSubgroupInvocationID(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize)
+static bool checkComputeSubgroupInvocationID (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32 subgroupSize)
{
DE_UNREF(internalData);
+
const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 nX = 0; nX < numWorkgroups[0]; ++nX)
return true;
}
-static bool checkComputeNumSubgroups (const void* internalData,
- std::vector<const void*> datas,
- const deUint32 numWorkgroups[3],
- const deUint32 localSize[3],
- deUint32)
+static bool checkComputeNumSubgroups (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
+
const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 nX = 0; nX < numWorkgroups[0]; ++nX)
{
for (deUint32 lY = 0; lY < localSize[1]; ++lY)
{
- for (deUint32 lZ = 0; lZ < localSize[2];
- ++lZ)
+ for (deUint32 lZ = 0; lZ < localSize[2]; ++lZ)
{
const deUint32 globalInvocationX =
nX * localSize[0] + lX;
return true;
}
-static bool checkComputeSubgroupID (const void* internalData,
- std::vector<const void*> datas,
- const deUint32 numWorkgroups[3],
- const deUint32 localSize[3],
- deUint32)
+static bool checkComputeSubgroupID (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
{
struct CaseDefinition
{
- std::string varName;
+ TestType testType;
VkShaderStageFlags shaderStage;
de::SharedPtr<bool> geometryPointSizeSupported;
deBool requiredSubgroupSize;
void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- const vk::SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
{
/*
"\n"
"void main (void)\n"
"{\n"
- " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID, 1.0f, 1.0f);\n"
+ " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 1.0f, 1.0f);\n"
" gl_Position = in_position;\n"
" gl_PointSize = 1.0f;\n"
"}\n";
"%28 = OpVariable %27 Input\n"
"%31 = OpConstant %25 1\n"
"%32 = OpTypePointer Output %6\n"
+ "%99 = OpConstant %10 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%13 = OpLoad %10 %12\n"
"%14 = OpConvertUToF %6 %13\n"
- "%16 = OpLoad %10 %15\n"
+ "%98 = OpLoad %10 %15\n"
+ "%16 = OpIAdd %10 %98 %99\n"
"%17 = OpConvertUToF %6 %16\n"
"%19 = OpCompositeConstruct %7 %14 %17 %18 %18\n"
"OpStore %9 %19\n"
"void main (void)\n"
"{\n"
" gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0.0f, 0.0f);\n"
+ " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0.0f, 0.0f);\n"
"}\n";
*/
const string evaluationSource =
"%48 = OpTypeArray %7 %17\n"
"%49 = OpTypePointer Input %48\n"
"%50 = OpVariable %49 Input\n"
+ "%99 = OpConstant %8 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%22 = OpAccessChain %21 %20 %15 %15\n"
"OpStore %37 %35\n"
"%41 = OpLoad %8 %40\n"
"%42 = OpConvertUToF %6 %41\n"
- "%44 = OpLoad %8 %43\n"
+ "%98 = OpLoad %8 %43\n"
+ "%44 = OpIAdd %8 %98 %99\n"
"%45 = OpConvertUToF %6 %44\n"
"%47 = OpCompositeConstruct %7 %42 %45 %46 %46\n"
"OpStore %38 %47\n"
" gl_TessLevelOuter[0] = 1.0f;\n"
" gl_TessLevelOuter[1] = 1.0f;\n"
" }\n"
- " out_color[gl_InvocationID] = vec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " out_color[gl_InvocationID] = vec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
"}\n";
*/
"%53 = OpTypePointer Input %52\n"
"%54 = OpVariable %53 Input\n"
"%56 = OpTypePointer Input %26\n"
+ "%99 = OpConstant %16 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%9 = OpLoad %6 %8\n"
"%31 = OpLoad %6 %8\n"
"%34 = OpLoad %16 %33\n"
"%35 = OpConvertUToF %15 %34\n"
- "%37 = OpLoad %16 %36\n"
+ "%98 = OpLoad %16 %36\n"
+ "%37 = OpIAdd %16 %98 %99\n"
"%38 = OpConvertUToF %15 %37\n"
"%40 = OpCompositeConstruct %26 %35 %38 %39 %39\n"
"%42 = OpAccessChain %41 %30 %31\n"
"layout(location = 0) out vec4 out_color;\n"
"void main (void)\n"
"{\n"
- " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " out_color = vec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" gl_Position = gl_in[0].gl_Position;\n"
" gl_PointSize = gl_in[0].gl_PointSize;\n"
" EmitVertex();\n"
" EndPrimitive();\n"
"}\n";
*/
- std::ostringstream geometry;
+ ostringstream geometry;
geometry
<< "; SPIR-V\n"
<< "; Version: 1.3\n"
"%35 = OpConstant %25 1\n"
"%36 = OpTypePointer Input %6\n"
"%39 = OpTypePointer Output %6\n" : "")
+ << "%99 = OpConstant %10 1024\n"
<< "%4 = OpFunction %2 None %3\n"
<< "%5 = OpLabel\n"
<< "%13 = OpLoad %10 %12\n"
<< "%14 = OpConvertUToF %6 %13\n"
- << "%16 = OpLoad %10 %15\n"
+ << "%98 = OpLoad %10 %15\n"
+ << "%16 = OpIAdd %10 %98 %99\n"
<< "%17 = OpConvertUToF %6 %16\n"
<< "%19 = OpCompositeConstruct %7 %14 %17 %18 %18\n"
<< "OpStore %9 %19\n"
}
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ vector<string> result (stageCount, string());
+
+ for (size_t i = 0; i < result.size(); ++i)
+ {
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uvec4 result[];\n"
+ "};\n";
+ }
+
+ return result;
+}
+
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ if (isAllComputeStages(caseDef.shaderStage))
{
- std::ostringstream src;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+
+ ostringstream src;
src << "#version 450\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< " result[offset] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, gl_NumSubgroups, gl_SubgroupID);\n"
<< "}\n";
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str()) << buildOptions;
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const SpirVAsmBuildOptions buildOptionsSpr (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+
{
/*
"#version 450\n"
"\n"
"void main (void)\n"
"{\n"
- " result[gl_VertexIndex] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " result[gl_VertexIndex] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" float pixelSize = 2.0f/1024.0f;\n"
" float pixelPosition = pixelSize/2.0f - 1.0f;\n"
" gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
"%48 = OpConstant %26 0\n"
"%50 = OpTypePointer Output %36\n"
"%52 = OpConstant %12 1\n"
+ "%99 = OpConstant %6 1024\n"
"%53 = OpTypePointer Output %26\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%30 = OpVariable %27 Function\n"
"%16 = OpLoad %12 %15\n"
"%19 = OpLoad %6 %18\n"
- "%21 = OpLoad %6 %20\n"
+ "%98 = OpLoad %6 %20\n"
+ "%21 = OpIAdd %6 %98 %99\n"
"%23 = OpCompositeConstruct %7 %19 %21 %22 %22\n"
"%25 = OpAccessChain %24 %11 %13 %16\n"
"OpStore %25 %23\n"
"OpStore %54 %34\n"
"OpReturn\n"
"OpFunctionEnd\n";
- programCollection.spirvAsmSources.add("vert") << vertex << SpirVAsmBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ programCollection.spirvAsmSources.add("vert") << vertex << buildOptionsSpr;
}
{
"\n"
"void main (void)\n"
"{\n"
- " result[gl_PrimitiveID] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " result[gl_PrimitiveID] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" if (gl_InvocationID == 0)\n"
" {\n"
" gl_TessLevelOuter[0] = 1.0f;\n"
"%61 = OpTypePointer Input %32\n"
"%62 = OpTypePointer Output %32\n"
"%63 = OpConstant %12 1\n" : "") +
+ "%99 = OpConstant %6 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%16 = OpLoad %12 %15\n"
"%19 = OpLoad %6 %18\n"
- "%21 = OpLoad %6 %20\n"
+ "%98 = OpLoad %6 %20\n"
+ "%21 = OpIAdd %6 %98 %99\n"
"%23 = OpCompositeConstruct %7 %19 %21 %22 %22\n"
"%25 = OpAccessChain %24 %11 %13 %16\n"
"OpStore %25 %23\n"
"OpStore %66 %65\n" : "") +
"OpReturn\n"
"OpFunctionEnd\n";
- programCollection.spirvAsmSources.add("tesc") << tesc << SpirVAsmBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ programCollection.spirvAsmSources.add("tesc") << tesc << buildOptionsSpr;
}
{
"\n"
"void main (void)\n"
"{\n"
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" float pixelSize = 2.0f/1024.0f;\n"
" gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
#if GEOMETRY_POINT_SIZE_SUPPORTED
"%67 = OpTypePointer Input %20\n"
"%68 = OpTypePointer Output %20\n"
"%69 = OpConstant %12 1\n" : "") +
+ "%99 = OpConstant %6 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%41 = OpVariable %40 Function\n"
"%30 = OpConvertFToU %6 %29\n"
"%31 = OpIAdd %6 %19 %30\n"
"%34 = OpLoad %6 %33\n"
- "%36 = OpLoad %6 %35\n"
+ "%98 = OpLoad %6 %35\n"
+ "%36 = OpIAdd %6 %98 %99\n"
"%37 = OpCompositeConstruct %7 %34 %36 %24 %24\n"
"%39 = OpAccessChain %38 %11 %13 %31\n"
"OpStore %39 %37\n"
"OpStore %72 %71\n" : "") +
"OpReturn\n"
"OpFunctionEnd\n";
- programCollection.spirvAsmSources.add("tese") << tese << SpirVAsmBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ programCollection.spirvAsmSources.add("tese") << tese << buildOptionsSpr;
}
{
"\n"
"void main (void)\n"
"{\n"
- " result[gl_PrimitiveIDIn] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " result[gl_PrimitiveIDIn] = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
" gl_Position = gl_in[0].gl_Position;\n"
#if GEOMETRY_POINT_SIZE_SUPPORTED
" gl_PointSize = gl_in[0].gl_PointSize;\n"
"%42 = OpTypePointer Input %26\n"
"%43 = OpTypePointer Output %26\n"
"%44 = OpConstant %12 1\n" : "") +
+ "%99 = OpConstant %6 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%16 = OpLoad %12 %15\n"
"%19 = OpLoad %6 %18\n"
- "%21 = OpLoad %6 %20\n"
+ "%98 = OpLoad %6 %20\n"
+ "%21 = OpIAdd %6 %98 %99\n"
"%23 = OpCompositeConstruct %7 %19 %21 %22 %22\n"
"%25 = OpAccessChain %24 %11 %13 %16\n"
"OpStore %25 %23\n"
"OpReturn\n"
"OpFunctionEnd\n";
- addGeometryShadersFromTemplate(geometry, SpirVAsmBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3), programCollection.spirvAsmSources);
+ addGeometryShadersFromTemplate(geometry, buildOptionsSpr, programCollection.spirvAsmSources);
}
{
"layout(location = 0) out uvec4 data;\n"
"void main (void)\n"
"{\n"
- " data = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID, 0, 0);\n"
+ " data = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n"
"}\n";
*/
const string fragment =
"%11 = OpVariable %10 Input\n"
"%13 = OpVariable %10 Input\n"
"%15 = OpConstant %6 0\n"
+ "%99 = OpConstant %6 1024\n"
"%4 = OpFunction %2 None %3\n"
"%5 = OpLabel\n"
"%12 = OpLoad %6 %11\n"
- "%14 = OpLoad %6 %13\n"
+ "%98 = OpLoad %6 %13\n"
+ "%14 = OpIAdd %6 %98 %99\n"
"%16 = OpCompositeConstruct %7 %12 %14 %15 %15\n"
"OpStore %9 %16\n"
"OpReturn\n"
"OpFunctionEnd\n";
- programCollection.spirvAsmSources.add("fragment") << fragment << SpirVAsmBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3);
+ programCollection.spirvAsmSources.add("fragment") << fragment << buildOptionsSpr;
}
subgroups::addNoSubgroupShader(programCollection);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_4, 0u, true);
+ const string extHeader = "#extension GL_KHR_shader_subgroup_basic : require\n";
+ const string tempRes = " uvec4 tempRes;\n";
+ const string testSrc = " tempRes = uvec4(gl_SubgroupSize, gl_SubgroupInvocationID + 1024, 0, 0);\n";
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32G32B32A32_UINT, false, extHeader, testSrc, "", headDeclarations, false, tempRes);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage");
}
void supportedCheck (Context& context, CaseDefinition caseDef)
{
- DE_UNREF(caseDef);
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
- {
- if (areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " + getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
-
if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
{
- if ("gl_SubgroupSize" == caseDef.varName)
- {
- return makeVertexFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
- }
- else if ("gl_SubgroupInvocationID" == caseDef.varName)
+ switch (caseDef.testType)
{
- return makeVertexFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
- }
- else
- {
- return tcu::TestStatus::fail(
- caseDef.varName + " failed (unhandled error checking case " +
- caseDef.varName + ")!");
+ case TEST_TYPE_SUBGROUP_SIZE: return makeVertexFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID: return makeVertexFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
+ default: TCU_THROW(InternalError, "Unknown builtin");
}
}
- else if ((VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) & caseDef.shaderStage )
+ else if ((VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT | VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) & caseDef.shaderStage)
{
- if ("gl_SubgroupSize" == caseDef.varName)
- {
- return makeTessellationEvaluationFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
- }
- else if ("gl_SubgroupInvocationID" == caseDef.varName)
+ switch (caseDef.testType)
{
- return makeTessellationEvaluationFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
- }
- else
- {
- return tcu::TestStatus::fail(
- caseDef.varName + " failed (unhandled error checking case " +
- caseDef.varName + ")!");
+ case TEST_TYPE_SUBGROUP_SIZE: return makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID: return makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
+ default: TCU_THROW(InternalError, "Unknown builtin");
}
}
- else if (VK_SHADER_STAGE_GEOMETRY_BIT & caseDef.shaderStage )
+ else if (VK_SHADER_STAGE_GEOMETRY_BIT & caseDef.shaderStage)
{
- if ("gl_SubgroupSize" == caseDef.varName)
- {
- return makeGeometryFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
- }
- else if ("gl_SubgroupInvocationID" == caseDef.varName)
+ switch (caseDef.testType)
{
- return makeGeometryFrameBufferTest(
- context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
- }
- else
- {
- return tcu::TestStatus::fail(
- caseDef.varName + " failed (unhandled error checking case " +
- caseDef.varName + ")!");
+ case TEST_TYPE_SUBGROUP_SIZE: return makeGeometryFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID: return makeGeometryFrameBufferTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID);
+ default: TCU_THROW(InternalError, "Unknown builtin");
}
}
else
}
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
{
- if (!areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " + getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
- if ("gl_SubgroupSize" == caseDef.varName)
+ switch (caseDef.testType)
{
- if (caseDef.requiredSubgroupSize == DE_FALSE)
- return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupSize);
+ case TEST_TYPE_SUBGROUP_SIZE:
+ {
+ if (caseDef.requiredSubgroupSize == DE_FALSE)
+ return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupSize);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
+ // According to the spec, requiredSubgroupSize must be a power-of-two integer.
+ for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
+ {
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupSize,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ if (result.getCode() != QP_TEST_RESULT_PASS)
+ {
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
- // According to the spec, requiredSubgroupSize must be a power-of-two integer.
- for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
- {
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupSize,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- if (result.getCode() != QP_TEST_RESULT_PASS)
- {
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
- return result;
+ return result;
+ }
}
+
+ return TestStatus::pass("OK");
}
- return tcu::TestStatus::pass("OK");
- }
- else if ("gl_SubgroupInvocationID" == caseDef.varName)
- {
- if (caseDef.requiredSubgroupSize == DE_FALSE)
- return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupInvocationID);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID:
+ {
+ if (caseDef.requiredSubgroupSize == DE_FALSE)
+ return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupInvocationID);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
+ // According to the spec, requiredSubgroupSize must be a power-of-two integer.
+ for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
+ {
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupInvocationID,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ if (result.getCode() != QP_TEST_RESULT_PASS)
+ {
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
- // According to the spec, requiredSubgroupSize must be a power-of-two integer.
- for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
- {
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupInvocationID,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- if (result.getCode() != QP_TEST_RESULT_PASS)
- {
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
- return result;
+ return result;
+ }
}
+
+ return TestStatus::pass("OK");
}
- return tcu::TestStatus::pass("OK");
- }
- else if ("gl_NumSubgroups" == caseDef.varName)
- {
- if (caseDef.requiredSubgroupSize == DE_FALSE)
- return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeNumSubgroups);
+ case TEST_TYPE_SUBGROUP_NUM_SUBGROUPS:
+ {
+ if (caseDef.requiredSubgroupSize == DE_FALSE)
+ return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeNumSubgroups);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
+ // According to the spec, requiredSubgroupSize must be a power-of-two integer.
+ for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
+ {
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeNumSubgroups,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ if (result.getCode() != QP_TEST_RESULT_PASS)
+ {
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
- // According to the spec, requiredSubgroupSize must be a power-of-two integer.
- for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
- {
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeNumSubgroups,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- if (result.getCode() != QP_TEST_RESULT_PASS)
- {
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
- return result;
+ return result;
+ }
}
+
+ return TestStatus::pass("OK");
}
- return tcu::TestStatus::pass("OK");
- }
- else if ("gl_SubgroupID" == caseDef.varName)
- {
- if (caseDef.requiredSubgroupSize == DE_FALSE)
- return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupID);
+ case TEST_TYPE_SUBGROUP_NUM_SUBGROUP_ID:
+ {
+ if (caseDef.requiredSubgroupSize == DE_FALSE)
+ return makeComputeTest(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupID);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
+ // According to the spec, requiredSubgroupSize must be a power-of-two integer.
+ for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
+ {
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupID,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ if (result.getCode() != QP_TEST_RESULT_PASS)
+ {
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
- // According to the spec, requiredSubgroupSize must be a power-of-two integer.
- for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
- {
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkComputeSubgroupID,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
- if (result.getCode() != QP_TEST_RESULT_PASS)
- {
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
- return result;
+ return result;
+ }
}
+
+ return TestStatus::pass("OK");
}
- return tcu::TestStatus::pass("OK");
- }
- else
- {
- return tcu::TestStatus::fail(
- caseDef.varName + " failed (unhandled error checking case " +
- caseDef.varName + ")!");
+ default:
+ TCU_THROW(InternalError, "Unknown builtin");
}
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ switch (caseDef.testType)
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
+ case TEST_TYPE_SUBGROUP_SIZE: return subgroups::allStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize, stages);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID: return subgroups::allStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID, stages);
+ default: TCU_THROW(InternalError, "Unknown builtin");
}
+ }
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- if ("gl_SubgroupSize" == caseDef.varName)
- {
- return subgroups::allStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize, stages);
- }
- else if ("gl_SubgroupInvocationID" == caseDef.varName)
- {
- return subgroups::allStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID, stages);
- }
- else
+ switch (caseDef.testType)
{
- return tcu::TestStatus::fail(
- caseDef.varName + " failed (unhandled error checking case " +
- caseDef.varName + ")!");
+ case TEST_TYPE_SUBGROUP_SIZE: return subgroups::allRayTracingStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupSize, stages);
+ case TEST_TYPE_SUBGROUP_INVOCATION_ID: return subgroups::allRayTracingStages(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStagesSubgroupInvocationID, stages);
+ default: TCU_THROW(InternalError, "Unknown builtin");
}
}
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
-tcu::TestCaseGroup* createSubgroupsBuiltinVarTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsBuiltinVarTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup builtin variable tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup builtin variable tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup builtin variable tests: framebuffer"));
-
- const char* const all_stages_vars[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "builtin_var", "Subgroup builtin variable tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup builtin variable tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup builtin variable tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup builtin variable tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup builtin variable tests: ray tracing"));
+ const TestType allStagesBuiltinVars[] =
{
- "SubgroupSize",
- "SubgroupInvocationID"
+ TEST_TYPE_SUBGROUP_SIZE,
+ TEST_TYPE_SUBGROUP_INVOCATION_ID,
};
-
- const char* const compute_only_vars[] =
+ const TestType computeOnlyBuiltinVars[] =
{
- "NumSubgroups",
- "SubgroupID"
+ TEST_TYPE_SUBGROUP_NUM_SUBGROUPS,
+ TEST_TYPE_SUBGROUP_NUM_SUBGROUP_ID,
};
-
- const VkShaderStageFlags stages[] =
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- for (int a = 0; a < DE_LENGTH_OF_ARRAY(all_stages_vars); ++a)
+ for (int a = 0; a < DE_LENGTH_OF_ARRAY(allStagesBuiltinVars); ++a)
{
- const std::string var = all_stages_vars[a];
- const std::string varLower = de::toLower(var);
+ const TestType testType = allStagesBuiltinVars[a];
+ const string varLower = de::toLower(getTestName(testType));
{
- const CaseDefinition caseDef = { "gl_" + var, VK_SHADER_STAGE_ALL_GRAPHICS, de::SharedPtr<bool>(new bool), DE_FALSE};
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
- addFunctionCaseWithPrograms(graphicGroup.get(),
- varLower, "",
- supportedCheck, initPrograms, test, caseDef);
+ addFunctionCaseWithPrograms(graphicGroup.get(), varLower, "", supportedCheck, initPrograms, test, caseDef);
}
{
- CaseDefinition caseDef = {"gl_" + var, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(),
- varLower + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(),
- varLower + "_" + getShaderStageName(caseDef.shaderStage) + "_requiredsubgroupsize", "",
- supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), varLower, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+ const string testName = varLower + "_" + getShaderStageName(caseDef.shaderStage) + testNameSuffix;
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- const CaseDefinition caseDef = {"gl_" + var, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(),
- varLower + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = varLower + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- for (int a = 0; a < DE_LENGTH_OF_ARRAY(compute_only_vars); ++a)
+ for (int a = 0; a < DE_LENGTH_OF_ARRAY(computeOnlyBuiltinVars); ++a)
{
- const std::string var = compute_only_vars[a];
+ const TestType testType = computeOnlyBuiltinVars[a];
+ const string varLower = de::toLower(getTestName(testType));
- CaseDefinition caseDef = {"gl_" + var, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
-
- addFunctionCaseWithPrograms(computeGroup.get(), de::toLower(var), "",
- supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), de::toLower(var) + "_requiredsubgroupsize", "",
- supportedCheck, initPrograms, test, caseDef);
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
+ const CaseDefinition caseDef =
+ {
+ testType, // TestType testType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+ const string testName = varLower + testNameSuffix;
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "builtin_var", "Subgroup builtin variable tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
+ group->addChild(raytracingGroup.release());
group->addChild(framebufferGroup.release());
return group.release();
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsBuiltinVarTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsBuiltinVarTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_CLUSTERED_LAST
};
-static Operator getOperator(OpType t)
+struct CaseDefinition
+{
+ Operator op;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static Operator getOperator (OpType opType)
{
- switch (t)
+ switch (opType)
{
case OPTYPE_CLUSTERED_ADD: return OPERATOR_ADD;
case OPTYPE_CLUSTERED_MUL: return OPERATOR_MUL;
case OPTYPE_CLUSTERED_AND: return OPERATOR_AND;
case OPTYPE_CLUSTERED_OR: return OPERATOR_OR;
case OPTYPE_CLUSTERED_XOR: return OPERATOR_XOR;
- default:
- DE_FATAL("Unsupported op type");
- return OPERATOR_ADD;
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 1);
+
+ return subgroups::check(datas, width, 1);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
}
-std::string getOpTypeName(Operator op)
+string getOpTypeName (Operator op)
{
return getScanOpName("subgroupClustered", "", op, SCAN_REDUCE);
}
-struct CaseDefinition
-{
- Operator op;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
-
-std::string getExtHeader(CaseDefinition caseDef)
+string getExtHeader (CaseDefinition& caseDef)
{
return "#extension GL_KHR_shader_subgroup_clustered: enable\n"
"#extension GL_KHR_shader_subgroup_ballot: enable\n" +
subgroups::getAdditionalExtensionForFormat(caseDef.format);
}
-std::string getTestSrc(CaseDefinition caseDef)
+string getTestSrc (CaseDefinition& caseDef)
{
- std::ostringstream bdy;
+ const string formatName = subgroups::getFormatNameForGLSL(caseDef.format);
+ const string opTypeName = getOpTypeName(caseDef.op);
+ const string identity = getIdentity(caseDef.op, caseDef.format);
+ const string opOperation = getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]");
+ const string compare = getCompare(caseDef.op, caseDef.format, "ref", "op");
+ ostringstream bdy;
+
bdy << " bool tempResult = true;\n"
<< " uvec4 mask = subgroupBallot(true);\n";
<< " const uint clusterSize = " << i << ";\n"
<< " if (clusterSize <= gl_SubgroupSize)\n"
<< " {\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " op = "
- << getOpTypeName(caseDef.op) + "(data[gl_SubgroupInvocationID], clusterSize);\n"
+ << " " << formatName << " op = "
+ << opTypeName + "(data[gl_SubgroupInvocationID], clusterSize);\n"
<< " for (uint clusterOffset = 0; clusterOffset < gl_SubgroupSize; clusterOffset += clusterSize)\n"
<< " {\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " ref = "
- << getIdentity(caseDef.op, caseDef.format) << ";\n"
+ << " " << formatName << " ref = "
+ << identity << ";\n"
<< " for (uint index = clusterOffset; index < (clusterOffset + clusterSize); index++)\n"
<< " {\n"
<< " if (subgroupBallotBitExtract(mask, index))\n"
<< " {\n"
- << " ref = " << getOpOperation(caseDef.op, caseDef.format, "ref", "data[index]") << ";\n"
+ << " ref = " << opOperation << ";\n"
<< " }\n"
<< " }\n"
<< " if ((clusterOffset <= gl_SubgroupInvocationID) && (gl_SubgroupInvocationID < (clusterOffset + clusterSize)))\n"
<< " {\n"
- << " if (!" << getCompare(caseDef.op, caseDef.format, "ref", "op") << ")\n"
+ << " if (!" << compare << ")\n"
<< " {\n"
<< " tempResult = false;\n"
<< " }\n"
<< " }\n"
<< " tempRes = tempResult ? 1 : 0;\n";
}
+
return bdy.str();
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- std::string extHeader = getExtHeader(caseDef);
- std::string testSrc = getTestSrc(caseDef);
+ const bool spirv14required = isAllRayTracingStages(caseDef.shaderStage);
+ const SpirvVersion spirvVersion = spirv14required ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSrc(caseDef);
subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
}
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData =
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd140;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+
subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd430;
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsClusteredTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsClusteredTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup clustered category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup clustered category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup clustered category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "clustered", "Subgroup clustered category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup clustered category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup clustered category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup clustered category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup clustered category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- bool isBool = subgroups::isFormatBool(format);
- bool isFloat = subgroups::isFormatFloat(format);
-
- OpType opType = static_cast<OpType>(opTypeIndex);
- Operator op = getOperator(opType);
-
- bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
-
- // Skip float with bitwise category.
- if (isFloat && isBitwiseOp)
- continue;
-
- // Skip bool when its not the bitwise category.
- if (isBool && !isBitwiseOp)
- continue;
-
- const std::string name = de::toLower(getOpTypeName(op))
- +"_" + subgroups::getFormatNameForGLSL(format);
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {op, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ const string name = de::toLower(getOpTypeName(op)) +"_" + formatName;
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name +"_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
}
+ }
+ }
- {
- const CaseDefinition caseDef = {op, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(), name,
- "", supportedCheck, initPrograms, test, caseDef);
- }
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_CLUSTERED_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef = {op, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name +"_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ {
+ const string name = de::toLower(getOpTypeName(op)) +"_" + formatName;
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
}
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "clustered", "Subgroup clustered category tests"));
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsClusteredTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsClusteredTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_LAST
};
-static Operator getOperator(OpType t)
+struct CaseDefinition
{
- switch (t)
+ Operator op;
+ ScanType scanType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static Operator getOperator (OpType opType)
+{
+ switch (opType)
{
case OPTYPE_ADD:
case OPTYPE_INCLUSIVE_ADD:
}
}
-static ScanType getScanType(OpType t)
+static ScanType getScanType (OpType opType)
{
- switch (t)
+ switch (opType)
{
case OPTYPE_ADD:
case OPTYPE_MUL:
}
}
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0xFFFFFF);
+
+ return subgroups::check(datas, width, 0xFFFFFF);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0xFFFFFF);
-}
-std::string getOpTypeName(Operator op, ScanType scanType)
-{
- return getScanOpName("subgroup", "", op, scanType);
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0xFFFFFF);
}
-std::string getOpTypeNamePartitioned(Operator op, ScanType scanType)
+string getOpTypeName (Operator op, ScanType scanType)
{
- return getScanOpName("subgroupPartitioned", "NV", op, scanType);
+ return getScanOpName("subgroup", "", op, scanType);
}
-struct CaseDefinition
+string getOpTypeNamePartitioned (Operator op, ScanType scanType)
{
- Operator op;
- ScanType scanType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
+ return getScanOpName("subgroupPartitioned", "NV", op, scanType);
+}
-std::string getExtHeader(CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition& caseDef)
{
return "#extension GL_NV_shader_subgroup_partitioned: enable\n"
"#extension GL_KHR_shader_subgroup_arithmetic: enable\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n" +
- subgroups::getAdditionalExtensionForFormat(caseDef.format);
+ "#extension GL_KHR_shader_subgroup_ballot: enable\n"
+ + subgroups::getAdditionalExtensionForFormat(caseDef.format);
}
-string getTestString(const CaseDefinition &caseDef)
+string getTestString (const CaseDefinition& caseDef)
{
Operator op = caseDef.op;
ScanType st = caseDef.scanType;
- // NOTE: tempResult can't have anything in bits 31:24 to avoid int->float
- // conversion overflow in framebuffer tests.
- string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
+ // NOTE: tempResult can't have anything in bits 31:24 to avoid int->float
+ // conversion overflow in framebuffer tests.
+ string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
string bdy =
+ " uvec4 mask = subgroupBallot(true);\n"
" uint tempResult = 0;\n"
" uint id = gl_SubgroupInvocationID;\n";
- // Test the case where the partition has a single subset with all invocations in it.
- // This should generate the same result as the non-partitioned function.
- bdy +=
- " uvec4 allBallot = mask;\n"
- " " + fmt + " allResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], allBallot);\n"
- " " + fmt + " refResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
- " if (" + getCompare(op, caseDef.format, "allResult", "refResult") + ") {\n"
- " tempResult |= 0x1;\n"
- " }\n";
+ // Test the case where the partition has a single subset with all invocations in it.
+ // This should generate the same result as the non-partitioned function.
+ bdy +=
+ " uvec4 allBallot = mask;\n"
+ " " + fmt + " allResult = " + getOpTypeNamePartitioned(op, st) + "(data[gl_SubgroupInvocationID], allBallot);\n"
+ " " + fmt + " refResult = " + getOpTypeName(op, st) + "(data[gl_SubgroupInvocationID]);\n"
+ " if (" + getCompare(op, caseDef.format, "allResult", "refResult") + ") {\n"
+ " tempResult |= 0x1;\n"
+ " }\n";
// The definition of a partition doesn't forbid bits corresponding to inactive
// invocations being in the subset with active invocations. In other words, test that
" } else {\n"
" tempResult |= 0xFC0000;\n"
" }\n"
+ " tempRes = tempResult;\n"
;
return bdy;
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- std::ostringstream bdy;
-
- std::string extHeader = getExtHeader(caseDef);
-
- subgroups::setFragmentShaderFrameBuffer(programCollection);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestString(caseDef);
+ const bool pointSizeSupport = *caseDef.geometryPointSizeSupported;
- if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
-
- bdy << getTestString(caseDef);
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertexSrc;
- vertexSrc << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec4 mask = subgroupBallot(true);\n"
- << bdy.str()
- << " out_color = float(tempResult);\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertexSrc.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- {
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec4 mask = subgroupBallot(true);\n"
- << bdy.str()
- << " out_color = float(tempResult);\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
-
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_InvocationID == 0)\n"
- <<" {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << " uvec4 mask = subgroupBallot(true);\n"
- << bdy.str()
- << " out_color[gl_InvocationID] = float(tempResult);"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
- << "}\n";
-
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
-
- std::ostringstream evaluationSource;
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec4 mask = subgroupBallot(true);\n"
- << bdy.str()
- << " out_color = float(tempResult);\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
- }
- else
- {
- DE_FATAL("Unsupported shader stage");
- }
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, pointSizeSupport, extHeader, testSrc, "");
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const string extHeader = getExtHeader(caseDef);
- const string bdy = getTestString(caseDef);
-
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
-
- src << "#version 450\n"
- << extHeader.c_str()
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << " uvec4 mask = subgroupBallot(true);\n"
- << bdy
- << " result[offset] = tempResult;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
- {
- const std::string vertex =
- "#version 450\n"
- + extHeader +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uvec4 mask = subgroupBallot(true);\n"
- + bdy+
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const std::string tesc =
- "#version 450\n"
- + extHeader +
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uvec4 mask = subgroupBallot(true);\n"
- + bdy +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const std::string tese =
- "#version 450\n"
- + extHeader +
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uvec4 mask = subgroupBallot(true);\n"
- + bdy +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- "}\n";
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const std::string geometry =
- "#version 450\n"
- + extHeader +
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uvec4 mask = subgroupBallot(true);\n"
- + bdy +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestString(caseDef);
+ const bool pointSizeSupport = false;
- {
- const std::string fragment =
- "#version 450\n"
- + extHeader +
- "layout(location = 0) out uint result;\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data[];\n"
- "};\n"
- "void main (void)\n"
- "{\n"
- " uvec4 mask = subgroupBallot(true);\n"
- + bdy +
- " result = tempResult;\n"
- "}\n";
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- subgroups::addNoSubgroupShader(programCollection);
- }
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, pointSizeSupport, extHeader, testSrc, "");
}
void supportedCheck (Context& context, CaseDefinition caseDef)
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData
{
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd140;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData =
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsPartitionedTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsPartitionedTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup partitioned category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup partitioned category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup partitioned category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "partitioned", "Subgroup partitioned category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup partitioned category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup partitioned category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup partitioned category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup partitioned category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- bool isBool = subgroups::isFormatBool(format);
- bool isFloat = subgroups::isFormatFloat(format);
-
- OpType opType = static_cast<OpType>(opTypeIndex);
- Operator op = getOperator(opType);
- ScanType st = getScanType(opType);
-
- bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
-
- // Skip float with bitwise category.
- if (isFloat && isBitwiseOp)
- continue;
-
- // Skip bool when its not the bitwise category.
- if (isBool && !isBitwiseOp)
- continue;
-
- const std::string name = de::toLower(getOpTypeName(op, st)) + "_" + subgroups::getFormatNameForGLSL(format);
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), name,
- "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize",
- "", supportedCheck, initPrograms, test, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const ScanType st = getScanType(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ const string name = de::toLower(getOpTypeName(op, st)) + "_" + formatName;
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
}
+ }
+ }
- {
- const CaseDefinition caseDef = {op, st, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(graphicGroup.get(), name,
- "", supportedCheck, initPrograms, test, caseDef);
- }
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isBool = subgroups::isFormatBool(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
+
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef = {op, st, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name +
- "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const Operator op = getOperator(opType);
+ const ScanType st = getScanType(opType);
+ const bool isBitwiseOp = (op == OPERATOR_AND || op == OPERATOR_OR || op == OPERATOR_XOR);
+
+ // Skip float with bitwise category.
+ if (isFloat && isBitwiseOp)
+ continue;
+
+ // Skip bool when its not the bitwise category.
+ if (isBool && !isBitwiseOp)
+ continue;
+
+ {
+ const CaseDefinition caseDef =
+ {
+ op, // Operator op;
+ st, // ScanType scanType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string name = de::toLower(getOpTypeName(op, st)) + "_" + formatName;
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
}
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "partitioned", "Subgroup partitioned category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsPartitionedTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsPartitionedTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 1);
+
+ return subgroups::check(datas, width, 1);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
}
-std::string getOpTypeName(int opType)
+string getOpTypeName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_QUAD_BROADCAST:
- case OPTYPE_QUAD_BROADCAST_NONCONST:
- return "subgroupQuadBroadcast";
- case OPTYPE_QUAD_SWAP_HORIZONTAL:
- return "subgroupQuadSwapHorizontal";
- case OPTYPE_QUAD_SWAP_VERTICAL:
- return "subgroupQuadSwapVertical";
- case OPTYPE_QUAD_SWAP_DIAGONAL:
- return "subgroupQuadSwapDiagonal";
+ case OPTYPE_QUAD_BROADCAST: return "subgroupQuadBroadcast";
+ case OPTYPE_QUAD_BROADCAST_NONCONST: return "subgroupQuadBroadcast";
+ case OPTYPE_QUAD_SWAP_HORIZONTAL: return "subgroupQuadSwapHorizontal";
+ case OPTYPE_QUAD_SWAP_VERTICAL: return "subgroupQuadSwapVertical";
+ case OPTYPE_QUAD_SWAP_DIAGONAL: return "subgroupQuadSwapDiagonal";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-std::string getOpTypeCaseName(int opType)
+string getOpTypeCaseName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_QUAD_BROADCAST:
- return "subgroupquadbroadcast";
- case OPTYPE_QUAD_BROADCAST_NONCONST:
- return "subgroupquadbroadcast_nonconst";
- case OPTYPE_QUAD_SWAP_HORIZONTAL:
- return "subgroupquadswaphorizontal";
- case OPTYPE_QUAD_SWAP_VERTICAL:
- return "subgroupquadswapvertical";
- case OPTYPE_QUAD_SWAP_DIAGONAL:
- return "subgroupquadswapdiagonal";
+ case OPTYPE_QUAD_BROADCAST: return "subgroupquadbroadcast";
+ case OPTYPE_QUAD_BROADCAST_NONCONST: return "subgroupquadbroadcast_nonconst";
+ case OPTYPE_QUAD_SWAP_HORIZONTAL: return "subgroupquadswaphorizontal";
+ case OPTYPE_QUAD_SWAP_VERTICAL: return "subgroupquadswapvertical";
+ case OPTYPE_QUAD_SWAP_DIAGONAL: return "subgroupquadswapdiagonal";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-struct CaseDefinition
-{
- int opType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
-
-std::string getExtHeader(VkFormat format)
+string getExtHeader (VkFormat format)
{
return "#extension GL_KHR_shader_subgroup_quad: enable\n"
"#extension GL_KHR_shader_subgroup_ballot: enable\n" +
subgroups::getAdditionalExtensionForFormat(format);
}
-std::string getTestSrc(const CaseDefinition &caseDef)
+string getTestSrc (const CaseDefinition &caseDef)
{
- const std::string swapTable[OPTYPE_LAST] = {
+ const string swapTable[OPTYPE_LAST] =
+ {
"",
"",
" const uint swapTable[4] = {1, 0, 3, 2};\n",
" const uint swapTable[4] = {2, 3, 0, 1};\n",
" const uint swapTable[4] = {3, 2, 1, 0};\n",
};
-
- const std::string validate =
+ const string validate =
" if (subgroupBallotBitExtract(mask, otherID) && op !=data[otherID])\n"
" tempRes = 0;\n";
+ const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
+ const string op = getOpTypeName(caseDef.opType);
+ ostringstream testSrc;
- std::string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
- std::string op = getOpTypeName(caseDef.opType);
-
- std::ostringstream testSrc;
testSrc << " uvec4 mask = subgroupBallot(true);\n"
<< swapTable[caseDef.opType]
<< " tempRes = 1;\n";
return testSrc.str();
}
-void initFrameBufferPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST) ? vk::SPIRV_VERSION_1_5 : vk::SPIRV_VERSION_1_3;
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST) ? SPIRV_VERSION_1_5 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
- subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, getExtHeader(caseDef.format), getTestSrc(caseDef), "");
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, getExtHeader(caseDef.format), getTestSrc(caseDef), "");
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::SpirvVersion spirvVersion = (caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST) ? vk::SPIRV_VERSION_1_5 : vk::SPIRV_VERSION_1_3;
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
-
- std::string extHeader = getExtHeader(caseDef.format);
- std::string testSrc = getTestSrc(caseDef);
+ const bool spirv15required = caseDef.opType == OPTYPE_QUAD_BROADCAST_NONCONST;
+ const bool spirv14required = isAllRayTracingStages(caseDef.shaderStage);
+ const SpirvVersion spirvVersion = spirv15required ? SPIRV_VERSION_1_5
+ : spirv14required ? SPIRV_VERSION_1_4
+ : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef.format);
+ const string testSrc = getTestSrc(caseDef);
subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, *caseDef.geometryPointSizeSupported, extHeader, testSrc, "");
}
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- if (subgroups::areSubgroupOperationsRequiredForStage(caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
-
subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd140;
inputData.numElements = subgroups::maxSupportedSubgroupSize();
inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = subgroups::SSBOData::InitializeNonZero;
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ subgroups::SSBOData inputData;
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
- {
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
inputData.format = caseDef.format;
inputData.layout = subgroups::SSBOData::LayoutStd430;
inputData.numElements = subgroups::maxSupportedSubgroupSize();
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsQuadTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsQuadTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup arithmetic category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "quad", "Subgroup quad category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup arithmetic category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup arithmetic category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- std::ostringstream name;
- name << getOpTypeCaseName(opTypeIndex);
-
- name << "_" << subgroups::getFormatNameForGLSL(format);
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), name.str(), "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), name.str() + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string name = getOpTypeCaseName(opType) + "_" + formatName;
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testNameSuffix = requiredSubgroupSize ? "_requiredsubgroupsize" : "";
+ const string testName = name + testNameSuffix;
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
}
+ }
+ }
+
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef =
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string testName = getOpTypeCaseName(opType) + "_" + formatName;
+ const CaseDefinition caseDef =
{
- opTypeIndex,
- VK_SHADER_STAGE_ALL_GRAPHICS,
- format,
- de::SharedPtr<bool>(new bool),
- DE_FALSE
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
};
- addFunctionCaseWithPrograms(graphicGroup.get(), name.str(), "", supportedCheck, initPrograms, test, caseDef);
- }
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
- {
- const CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name.str()+"_"+ getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
}
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "quad", "Subgroup quad category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsQuadTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsQuadTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
using namespace vk;
using namespace vkt;
-string getScanOpName(string prefix, string suffix, Operator op, ScanType scanType)
+string getScanOpName (string prefix, string suffix, Operator op, ScanType scanType)
{
string n;
switch (scanType)
return prefix + n + suffix;
}
-string getOpOperation(Operator op, VkFormat format, string lhs, string rhs)
+string getOpOperation (Operator op, VkFormat format, string lhs, string rhs)
{
switch (op)
{
}
}
-string getIdentity(Operator op, VkFormat format)
+string getIdentity (Operator op, VkFormat format)
{
- const bool isFloat = subgroups::isFormatFloat(format);
- const bool isInt = subgroups::isFormatSigned(format);
- const bool isUnsigned = subgroups::isFormatUnsigned(format);
+ const bool isFloat = subgroups::isFormatFloat(format);
+ const bool isInt = subgroups::isFormatSigned(format);
+ const bool isUnsigned = subgroups::isFormatUnsigned(format);
switch (op)
{
}
}
-string getCompare(Operator op, VkFormat format, string lhs, string rhs)
+string getCompare (Operator op, VkFormat format, string lhs, string rhs)
{
- string formatName = subgroups::getFormatNameForGLSL(format);
- bool isMinMax = (op == OPERATOR_MIN || op == OPERATOR_MAX);
+ const string formatName = subgroups::getFormatNameForGLSL(format);
+ const bool isMinMax = (op == OPERATOR_MIN || op == OPERATOR_MAX);
switch (format)
{
SCAN_EXCLUSIVE
};
-std::string getScanOpName(std::string prefix, std::string suffix, Operator op, ScanType scanType);
-std::string getOpOperation(Operator op, vk::VkFormat format, std::string lhs, std::string rhs);
-std::string getIdentity(Operator op, vk::VkFormat format);
-std::string getCompare(Operator op, vk::VkFormat format, std::string lhs, std::string rhs);
+std::string getScanOpName (std::string prefix, std::string suffix, Operator op, ScanType scanType);
+std::string getOpOperation (Operator op, vk::VkFormat format, std::string lhs, std::string rhs);
+std::string getIdentity (Operator op, vk::VkFormat format);
+std::string getCompare (Operator op, vk::VkFormat format, std::string lhs, std::string rhs);
#endif // _VKTSUBGROUPSSCANHELPERS_HPP
namespace
{
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
-{
- DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 1);
-}
-
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
-{
- DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
-}
-
enum OpType
{
OPTYPE_CLUSTERED = 0,
OPTYPE_LAST
};
-std::string getOpTypeName(int opType)
-{
- switch (opType)
- {
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_CLUSTERED:
- return "clustered";
- case OPTYPE_QUAD:
- return "quad";
- }
-}
-
struct CaseDefinition
{
- int opType;
+ OpType opType;
VkShaderStageFlags shaderStage;
de::SharedPtr<bool> geometryPointSizeSupported;
deBool requiredSubgroupSize;
};
-void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- std::ostringstream bdy;
- std::string extension = (OPTYPE_CLUSTERED == caseDef.opType) ?
- "#extension GL_KHR_shader_subgroup_clustered: enable\n" :
- "#extension GL_KHR_shader_subgroup_quad: enable\n";
-
- subgroups::setFragmentShaderFrameBuffer(programCollection);
-
- if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
+ DE_UNREF(internalData);
- extension += "#extension GL_KHR_shader_subgroup_ballot: enable\n";
+ return subgroups::check(datas, width, 1);
+}
- bdy << " uint tempResult = 0x1;\n"
- << " uvec4 mask = subgroupBallot(true);\n";
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
+{
+ DE_UNREF(internalData);
- if (OPTYPE_CLUSTERED == caseDef.opType)
- {
- for (deUint32 i = 1; i <= subgroups::maxSupportedSubgroupSize(); i *= 2)
- {
- bdy << " if (gl_SubgroupSize >= " << i << ")\n"
- << " {\n"
- << " uvec4 contribution = uvec4(0);\n"
- << " const uint modID = gl_SubgroupInvocationID % 32;\n"
- << " switch (gl_SubgroupInvocationID / 32)\n"
- << " {\n"
- << " case 0: contribution.x = 1 << modID; break;\n"
- << " case 1: contribution.y = 1 << modID; break;\n"
- << " case 2: contribution.z = 1 << modID; break;\n"
- << " case 3: contribution.w = 1 << modID; break;\n"
- << " }\n"
- << " uvec4 result = subgroupClusteredOr(contribution, " << i << ");\n"
- << " uint rootID = gl_SubgroupInvocationID & ~(" << i - 1 << ");\n"
- << " for (uint i = 0; i < " << i << "; i++)\n"
- << " {\n"
- << " uint nextID = rootID + i;\n"
- << " if (subgroupBallotBitExtract(mask, nextID) ^^ subgroupBallotBitExtract(result, nextID))\n"
- << " {\n"
- << " tempResult = 0;\n"
- << " }\n"
- << " }\n"
- << " }\n";
- }
- }
- else
- {
- bdy << " uint cluster[4] =\n"
- << " {\n"
- << " subgroupQuadBroadcast(gl_SubgroupInvocationID, 0),\n"
- << " subgroupQuadBroadcast(gl_SubgroupInvocationID, 1),\n"
- << " subgroupQuadBroadcast(gl_SubgroupInvocationID, 2),\n"
- << " subgroupQuadBroadcast(gl_SubgroupInvocationID, 3)\n"
- << " };\n"
- << " uint rootID = gl_SubgroupInvocationID & ~0x3;\n"
- << " for (uint i = 0; i < 4; i++)\n"
- << " {\n"
- << " uint nextID = rootID + i;\n"
- << " if (subgroupBallotBitExtract(mask, nextID) && (cluster[i] != nextID))\n"
- << " {\n"
- << " tempResult = mask.x;\n"
- << " }\n"
- << " }\n";
- }
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
+}
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertexSrc;
- vertexSrc << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extension
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float result;\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << bdy.str()
- << " result = float(tempResult);\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertexSrc.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- {
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extension
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << bdy.str()
- << " out_color = float(tempResult);\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
-
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
-
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extension
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_InvocationID == 0)\n"
- <<" {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << bdy.str()
- << " out_color[gl_InvocationID] = float(tempResult);\n"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
- std::ostringstream evaluationSource;
-
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extension
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "void main (void)\n"
- << "{\n"
- << bdy.str()
- << " out_color = float(tempResult);\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
- }
- else
+string getOpTypeName (const OpType opType)
+{
+ switch (opType)
{
- DE_FATAL("Unsupported shader stage");
+ case OPTYPE_CLUSTERED: return "clustered";
+ case OPTYPE_QUAD: return "quad";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition& caseDef)
{
- std::string extension = (OPTYPE_CLUSTERED == caseDef.opType) ?
- "#extension GL_KHR_shader_subgroup_clustered: enable\n" :
- "#extension GL_KHR_shader_subgroup_quad: enable\n";
+ const string testExtensions = (OPTYPE_CLUSTERED == caseDef.opType)
+ ? "#extension GL_KHR_shader_subgroup_clustered: enable\n"
+ : "#extension GL_KHR_shader_subgroup_quad: enable\n";
+ const string extensions = testExtensions
+ + "#extension GL_KHR_shader_subgroup_ballot: enable\n";
- extension += "#extension GL_KHR_shader_subgroup_ballot: enable\n";
+ return extensions;
+}
- std::ostringstream bdy;
+string getBodySource (const CaseDefinition& caseDef)
+{
+ ostringstream bdy;
bdy << " uint tempResult = 0x1;\n"
<< " uvec4 mask = subgroupBallot(true);\n";
<< " }\n";
}
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ bdy << " tempRes = tempResult;\n";
+
+ return bdy.str();
+}
+
+vector<string> getFramebufferPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ vector<string> result;
+
+ DE_UNREF(caseDef);
+
+ result.push_back("layout(location = 0) out float result;\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+ result.push_back("layout(location = 0) out float out_color[];\n");
+ result.push_back("layout(location = 0) out float out_color;\n");
+
+ return result;
+}
+
+void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getBodySource(caseDef);
+ const vector<string> headDeclarations = getFramebufferPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
+}
+
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
+
+ if (fragment)
+ result.reserve(result.size() + 1);
+
+ for (size_t i = 0; i < result.size(); ++i)
{
- std::ostringstream src;
-
- src << "#version 450\n"
- << extension
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << bdy.str()
- << " result[offset] = tempResult;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
}
- else
- {
- {
- const string vertex =
- "#version 450\n"
- + extension +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy.str() +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- {
- const string tesc =
- "#version 450\n"
- + extension +
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy.str() +
- " result[gl_PrimitiveID] = 1;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ if (fragment)
+ {
+ const string fragPart =
+ "layout(location = 0) out uint result;\n";
- {
- const string tese =
- "#version 450\n"
- + extension +
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy.str() +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = 1;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+ result.push_back(fragPart);
+ }
- {
- const string geometry =
- "#version 450\n"
- + extension +
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdy.str() +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
+ return result;
+}
- {
- const string fragment =
- "#version 450\n"
- + extension +
- "layout(location = 0) out uint result;\n"
- "void main (void)\n"
- "{\n"
- + bdy.str() +
- " result = tempResult;\n"
- "}\n";
-
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- subgroups::addNoSubgroupShader(programCollection);
- }
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getBodySource(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupport = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupport, extHeader, testSrc, "", headDeclarations);
}
void supportedCheck (Context& context, CaseDefinition caseDef)
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
+ switch (caseDef.shaderStage)
{
- if (subgroups::areSubgroupOperationsRequiredForStage(
- caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
}
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::isSubgroupFeatureSupportedForDevice(context, VK_SUBGROUP_FEATURE_BASIC_BIT))
+ if (isAllComputeStages(caseDef.shaderStage))
{
- return tcu::TestStatus::fail(
- "Subgroup feature " +
- subgroups::getShaderStageName(VK_SUBGROUP_FEATURE_BASIC_BIT) +
- " is a required capability!");
- }
-
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkCompute,
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkCompute,
size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
- {
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
return subgroups::allStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, DE_NULL, 0, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsShapeTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsShapeTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup shape category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup shape category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup shape category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "shape", "Subgroup shape category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup shape category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup shape category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup shape category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup shape category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const std::string op = de::toLower(getOpTypeName(opTypeIndex));
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string op = de::toLower(getOpTypeName(opType));
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = op + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
{
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), op + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
}
{
- const CaseDefinition caseDef =
+ const CaseDefinition caseDef =
{
- opTypeIndex,
- VK_SHADER_STAGE_ALL_GRAPHICS,
- de::SharedPtr<bool>(new bool),
- DE_FALSE
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
};
- addFunctionCaseWithPrograms(graphicGroup.get(),
- op, "",
- supportedCheck, initPrograms, test, caseDef);
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), op, "", supportedCheck, initPrograms, test, caseDef);
}
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- const CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(),op + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = op + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "shape", "Subgroup shape category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsShapeTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsShapeTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
OPTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+};
+
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 1);
+
+ return subgroups::check(datas, width, 1);
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 1);
}
-std::string getOpTypeName(int opType)
+string getOpTypeName (OpType opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_SHUFFLE:
- return "subgroupShuffle";
- case OPTYPE_SHUFFLE_XOR:
- return "subgroupShuffleXor";
- case OPTYPE_SHUFFLE_UP:
- return "subgroupShuffleUp";
- case OPTYPE_SHUFFLE_DOWN:
- return "subgroupShuffleDown";
+ case OPTYPE_SHUFFLE: return "subgroupShuffle";
+ case OPTYPE_SHUFFLE_XOR: return "subgroupShuffleXor";
+ case OPTYPE_SHUFFLE_UP: return "subgroupShuffleUp";
+ case OPTYPE_SHUFFLE_DOWN: return "subgroupShuffleDown";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-struct CaseDefinition
+string getExtHeader (const CaseDefinition& caseDef)
{
- int opType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
-};
+ const string eSource = (OPTYPE_SHUFFLE == caseDef.opType || OPTYPE_SHUFFLE_XOR == caseDef.opType)
+ ? "#extension GL_KHR_shader_subgroup_shuffle: enable\n"
+ : "#extension GL_KHR_shader_subgroup_shuffle_relative: enable\n";
+
+ return eSource
+ + "#extension GL_KHR_shader_subgroup_ballot: enable\n"
+ + subgroups::getAdditionalExtensionForFormat(caseDef.format);
+}
+
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ const string formatName = subgroups::getFormatNameForGLSL(caseDef.format);
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ const size_t resultSize = stageCount + (fragment ? 1 : 0);
+ vector<string> result (resultSize, string());
+
+ for (deUint32 i = 0; i < result.size(); ++i)
+ {
+ const deUint32 binding0 = i;
+ const deUint32 binding1 = stageCount;
+ const deUint32 binding2 = stageCount + 1;
+ const string buffer1 = (i == stageCount)
+ ? "layout(location = 0) out uint result;\n"
+ : "layout(set = 0, binding = " + de::toString(binding0) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
+
+ result[i] =
+ buffer1 +
+ "layout(set = 0, binding = " + de::toString(binding1) + ", std430) readonly buffer Buffer2\n"
+ "{\n"
+ " " + formatName + " data1[];\n"
+ "};\n"
+ "layout(set = 0, binding = " + de::toString(binding2) + ", std430) readonly buffer Buffer3\n"
+ "{\n"
+ " uint data2[];\n"
+ "};\n";
+ }
-const std::string to_string(int x) {
- std::ostringstream oss;
- oss << x;
- return oss.str();
+ return result;
}
-const std::string DeclSource(CaseDefinition caseDef, int baseBinding)
+vector<string> getFramebufferPerStageHeadDeclarations (const CaseDefinition& caseDef)
{
- return
- "layout(set = 0, binding = " + to_string(baseBinding) + ", std430) readonly buffer Buffer2\n"
+ const string formatName = subgroups::getFormatNameForGLSL(caseDef.format);
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ vector<string> result (stageCount, string());
+ const string buffer2
+ {
+ "layout(set = 0, binding = 0) uniform Buffer1\n"
"{\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " data1[];\n"
+ " " + formatName + " data1[" + de::toString(subgroups::maxSupportedSubgroupSize()) + "];\n"
"};\n"
- "layout(set = 0, binding = " + to_string(baseBinding + 1) + ", std430) readonly buffer Buffer3\n"
+ "layout(set = 0, binding = 1) uniform Buffer2\n"
"{\n"
- " uint data2[];\n"
- "};\n";
+ " uint data2[" + de::toString(subgroups::maxSupportedSubgroupSize()) + "];\n"
+ "};\n"
+ };
+
+ for (size_t i = 0; i < result.size(); ++i)
+ {
+ switch (i)
+ {
+ case 0: result[i] = "layout(location = 0) out float result;\n" + buffer2; break;
+ case 1: result[i] = "layout(location = 0) out float out_color;\n" + buffer2; break;
+ case 2: result[i] = "layout(location = 0) out float out_color[];\n" + buffer2; break;
+ case 3: result[i] = "layout(location = 0) out float out_color;\n" + buffer2; break;
+ default: TCU_THROW(InternalError, "Unknown stage");
+ }
+ }
+
+ return result;
}
-const std::string TestSource(CaseDefinition caseDef)
+const string getTestSource (const CaseDefinition& caseDef)
{
- std::string idTable[OPTYPE_LAST];
- idTable[OPTYPE_SHUFFLE] = "id_in";
- idTable[OPTYPE_SHUFFLE_XOR] = "gl_SubgroupInvocationID ^ id_in";
- idTable[OPTYPE_SHUFFLE_UP] = "gl_SubgroupInvocationID - id_in";
- idTable[OPTYPE_SHUFFLE_DOWN] = "gl_SubgroupInvocationID + id_in";
-
- const std::string testSource =
+ const string id = caseDef.opType == OPTYPE_SHUFFLE ? "id_in"
+ : caseDef.opType == OPTYPE_SHUFFLE_XOR ? "gl_SubgroupInvocationID ^ id_in"
+ : caseDef.opType == OPTYPE_SHUFFLE_UP ? "gl_SubgroupInvocationID - id_in"
+ : caseDef.opType == OPTYPE_SHUFFLE_DOWN ? "gl_SubgroupInvocationID + id_in"
+ : "";
+ const string testSource =
" uint temp_res;\n"
" uvec4 mask = subgroupBallot(true);\n"
" uint id_in = data2[gl_SubgroupInvocationID] & (gl_SubgroupSize - 1);\n"
" " + subgroups::getFormatNameForGLSL(caseDef.format) + " op = "
+ getOpTypeName(caseDef.opType) + "(data1[gl_SubgroupInvocationID], id_in);\n"
- " uint id = " + idTable[caseDef.opType] + ";\n"
+ " uint id = " + id + ";\n"
" if ((id < gl_SubgroupSize) && subgroupBallotBitExtract(mask, id))\n"
" {\n"
" temp_res = (op == data1[id]) ? 1 : 0;\n"
" else\n"
" {\n"
" temp_res = 1; // Invocation we read from was inactive, so we can't verify results!\n"
- " }\n";
+ " }\n"
+ " tempRes = temp_res;\n";
return testSource;
}
void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
-
- subgroups::setFragmentShaderFrameBuffer(programCollection);
-
- if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
-
- const std::string extSource =
- (OPTYPE_SHUFFLE == caseDef.opType || OPTYPE_SHUFFLE_XOR == caseDef.opType) ?
- "#extension GL_KHR_shader_subgroup_shuffle: enable\n" :
- "#extension GL_KHR_shader_subgroup_shuffle_relative: enable\n";
-
- const std::string testSource = TestSource(caseDef);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSource(caseDef);
+ const vector<string> headDeclarations = getFramebufferPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertexSrc;
- vertexSrc << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float result;\n"
- << extSource
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data1[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "layout(set = 0, binding = 1) uniform Buffer2\n"
- << "{\n"
- << " uint data2[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSource
- << " result = temp_res;\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertexSrc.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- {
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extSource
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data1[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "layout(set = 0, binding = 1) uniform Buffer2\n"
- << "{\n"
- << " uint data2[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSource
- << " out_color = temp_res;\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
-
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
-
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extSource
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data1[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "layout(set = 0, binding = 1) uniform Buffer2\n"
- << "{\n"
- << " uint data2[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " if (gl_InvocationID == 0)\n"
- <<" {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << testSource
- << " out_color[gl_InvocationID] = temp_res;\n"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
- << "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
-
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
- std::ostringstream evaluationSource;
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extSource
- << "#extension GL_KHR_shader_subgroup_ballot: enable\n"
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data1[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "layout(set = 0, binding = 1) uniform Buffer2\n"
- << "{\n"
- << " uint data2[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << testSource
- << " out_color = temp_res;\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
-
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
- }
- else
- {
- DE_FATAL("Unsupported shader stage");
- }
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const std::string vSource =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_ballot: enable\n";
- const std::string eSource =
- (OPTYPE_SHUFFLE == caseDef.opType || OPTYPE_SHUFFLE_XOR == caseDef.opType) ?
- "#extension GL_KHR_shader_subgroup_shuffle: enable\n" :
- "#extension GL_KHR_shader_subgroup_shuffle_relative: enable\n";
- const std::string extSource = vSource + eSource + subgroups::getAdditionalExtensionForFormat(caseDef.format);
-
- const std::string testSource = TestSource(caseDef);
-
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
-
- src << extSource
- << "layout (local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << DeclSource(caseDef, 1)
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << testSource
- << " result[offset] = temp_res;\n"
- << "}\n";
-
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
- const std::string declSource = DeclSource(caseDef, 4);
-
- {
- const string vertex =
- extSource +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- + declSource +
- "\n"
- "void main (void)\n"
- "{\n"
- + testSource +
- " result[gl_VertexIndex] = temp_res;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string tesc =
- extSource +
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- + declSource +
- "\n"
- "void main (void)\n"
- "{\n"
- + testSource +
- " result[gl_PrimitiveID] = temp_res;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string tese =
- extSource +
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- + declSource +
- "\n"
- "void main (void)\n"
- "{\n"
- + testSource +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = temp_res;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string geometry =
- extSource +
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- + declSource +
- "\n"
- "void main (void)\n"
- "{\n"
- + testSource +
- " result[gl_PrimitiveIDIn] = temp_res;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
- {
- const string fragment =
- extSource +
- "layout(location = 0) out uint result;\n"
- + declSource +
- "void main (void)\n"
- "{\n"
- + testSource +
- " result = temp_res;\n"
- "}\n";
-
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- subgroups::addNoSubgroupShader(programCollection);
- }
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSource(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+ const bool pointSizeSupported = *caseDef.geometryPointSizeSupported;
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, pointSizeSupported, extHeader, testSrc, "", headDeclarations);
}
void supportedCheck (Context& context, CaseDefinition caseDef)
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
+ const subgroups::SSBOData inputData[2]
{
- if (subgroups::areSubgroupOperationsRequiredForStage(
- caseDef.shaderStage))
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ },
{
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
}
- }
+ };
- subgroups::SSBOData inputData[2];
- inputData[0].format = caseDef.format;
- inputData[0].layout = subgroups::SSBOData::LayoutStd140;
- inputData[0].numElements = subgroups::maxSupportedSubgroupSize();
- inputData[0].initializeType = subgroups::SSBOData::InitializeNonZero;
-
- inputData[1].format = VK_FORMAT_R32_UINT;
- inputData[1].layout = subgroups::SSBOData::LayoutStd140;
- inputData[1].numElements = inputData[0].numElements;
- inputData[1].initializeType = subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData[2]
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- subgroups::SSBOData inputData[2];
- inputData[0].format = caseDef.format;
- inputData[0].layout = subgroups::SSBOData::LayoutStd430;
- inputData[0].numElements = subgroups::maxSupportedSubgroupSize();
- inputData[0].initializeType = subgroups::SSBOData::InitializeNonZero;
-
- inputData[1].format = VK_FORMAT_R32_UINT;
- inputData[1].layout = subgroups::SSBOData::LayoutStd430;
- inputData[1].numElements = inputData[0].numElements;
- inputData[1].initializeType = subgroups::SSBOData::InitializeNonZero;
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ },
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ },
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkCompute);
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkCompute,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkCompute,
+ size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
-
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData[2]
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData[2];
- inputData[0].format = caseDef.format;
- inputData[0].layout = subgroups::SSBOData::LayoutStd430;
- inputData[0].numElements = subgroups::maxSupportedSubgroupSize();
- inputData[0].initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData[0].binding = 4u;
- inputData[0].stages = stages;
-
- inputData[1].format = VK_FORMAT_R32_UINT;
- inputData[1].layout = subgroups::SSBOData::LayoutStd430;
- inputData[1].numElements = inputData[0].numElements;
- inputData[1].initializeType = subgroups::SSBOData::InitializeNonZero;
- inputData[1].binding = 5u;
- inputData[1].stages = stages;
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ },
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 5u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ },
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData[2]
+ {
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ },
+ {
+ subgroups::SSBOData::InitializeNonZero, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ VK_FORMAT_R32_UINT, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 7u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ },
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, inputData, 2, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsShuffleTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsShuffleTests (TestContext& testCtx)
{
-
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup shuffle category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup shuffle category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup shuffle category tests: framebuffer"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "shuffle", "Subgroup shuffle category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup shuffle category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup shuffle category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup shuffle category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup shuffle category tests: ray tracing"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
VK_SHADER_STAGE_GEOMETRY_BIT,
};
+ const deBool boolValues[] =
+ {
+ DE_FALSE,
+ DE_TRUE
+ };
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
+ const vector<VkFormat> formats = subgroups::getAllFormats();
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
- const string name =
- de::toLower(getOpTypeName(opTypeIndex)) +
- "_" + subgroups::getFormatNameForGLSL(format);
-
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef =
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string name = de::toLower(getOpTypeName(opType)) + "_" + formatName;
+
{
- opTypeIndex,
- VK_SHADER_STAGE_ALL_GRAPHICS,
- format,
- de::SharedPtr<bool>(new bool),
- DE_FALSE
- };
- addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
+ {
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(computeGroup.get(), testName, "", supportedCheck, initPrograms, test, caseDef);
+ }
+
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ {
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroup.get(), testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ }
}
+ }
+ }
- {
- CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(computeGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(), name + "_requiredsubgroupsize", "", supportedCheck, initPrograms, test, caseDef);
- }
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+ const string formatName = subgroups::getFormatNameForGLSL(format);
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- const CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE};
- addFunctionCaseWithPrograms(framebufferGroup.get(), name + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+ const string name = de::toLower(getOpTypeName(opType)) + "_" + formatName;
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE // deBool requiredSubgroupSize;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
}
}
}
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "shuffle", "Subgroup shuffle category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsShuffleTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsShuffleTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
struct CaseDefinition
{
- deUint32 pipelineShaderStageCreateFlags;
- VkShaderStageFlags shaderStage;
- deBool requiresBallot;
- deUint32 requiredSubgroupSizeMode;
+ deUint32 pipelineShaderStageCreateFlags;
+ VkShaderStageFlags shaderStage;
+ deBool requiresBallot;
+ deUint32 requiredSubgroupSizeMode;
};
struct internalDataStruct
}
else
{
- deUint32 greater = std::max(a, b);
- deUint32 lesser = std::min(a, b);
+ deUint32 greater = max(a, b);
+ deUint32 lesser = min(a, b);
return gcd(lesser, greater % lesser);
}
}
-void getLocalSizes (VkPhysicalDeviceProperties physicalDeviceProperties, deUint32 numWorkGroupInvocations,
- deUint32& localSizeX, deUint32& localSizeY, deUint32& localSizeZ)
+UVec3 getLocalSizes (const VkPhysicalDeviceProperties& physicalDeviceProperties,
+ deUint32 numWorkGroupInvocations)
{
DE_ASSERT(numWorkGroupInvocations <= physicalDeviceProperties.limits.maxComputeWorkGroupInvocations);
- localSizeX = gcd(numWorkGroupInvocations, physicalDeviceProperties.limits.maxComputeWorkGroupSize[0]);
- localSizeY = gcd(deMax32(numWorkGroupInvocations / localSizeX, 1u), physicalDeviceProperties.limits.maxComputeWorkGroupSize[1]);
- localSizeZ = deMax32(numWorkGroupInvocations / (localSizeX * localSizeY), 1u);
+ const deUint32 localSizeX = gcd(numWorkGroupInvocations, physicalDeviceProperties.limits.maxComputeWorkGroupSize[0]);
+ const deUint32 localSizeY = gcd(deMax32(numWorkGroupInvocations / localSizeX, 1u), physicalDeviceProperties.limits.maxComputeWorkGroupSize[1]);
+ const deUint32 localSizeZ = deMax32(numWorkGroupInvocations / (localSizeX * localSizeY), 1u);
+
+ return UVec3(localSizeX, localSizeY, localSizeZ);
}
-deUint32 getRequiredSubgroupSizeFromMode (Context &context, const CaseDefinition caseDef,
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties)
+deUint32 getRequiredSubgroupSizeFromMode (Context& context,
+ const CaseDefinition& caseDef,
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties)
{
switch (caseDef.requiredSubgroupSizeMode)
{
- case REQUIRED_SUBGROUP_SIZE_MAX: return subgroupSizeControlProperties.maxSubgroupSize;
- case REQUIRED_SUBGROUP_SIZE_MIN: return subgroupSizeControlProperties.minSubgroupSize;
- case REQUIRED_SUBGROUP_SIZE_NONE: return vkt::subgroups::getSubgroupSize(context);
- default: TCU_THROW(NotSupportedError, "Unsupported Subgroup size");
+ case REQUIRED_SUBGROUP_SIZE_MAX: return subgroupSizeControlProperties.maxSubgroupSize;
+ case REQUIRED_SUBGROUP_SIZE_MIN: return subgroupSizeControlProperties.minSubgroupSize;
+ case REQUIRED_SUBGROUP_SIZE_NONE: return subgroups::getSubgroupSize(context);
+ default: TCU_THROW(NotSupportedError, "Unsupported Subgroup size");
}
}
-static bool checkVertexPipelineStages (const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
- const struct internalDataStruct *checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
- const Context *context = checkInternalData->context;
- tcu::TestLog& log = context->getTestContext().getLog();
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context->getInstanceInterface().getPhysicalDeviceProperties2(context->getPhysicalDevice(), &properties);
- const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+ const struct internalDataStruct* checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
+ const Context* context = checkInternalData->context;
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context->getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context->getTestContext().getLog();
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 i = 0; i < width; i++)
{
if (data[i] > subgroupSizeControlProperties.maxSubgroupSize ||
data[i] < subgroupSizeControlProperties.minSubgroupSize)
{
- log << tcu::TestLog::Message << "gl_SubgroupSize (" << data[i] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "gl_SubgroupSize (" << data[i] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
if (checkInternalData->caseDef.requiredSubgroupSizeMode != REQUIRED_SUBGROUP_SIZE_NONE && data[i] != checkInternalData->requiredSubgroupSize)
{
- log << tcu::TestLog::Message << "gl_SubgroupSize (" << data[i] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "gl_SubgroupSize (" << data[i] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
}
return DE_TRUE;
}
-static bool checkFragmentPipelineStages (const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height, deUint32)
+static bool checkFragmentPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height,
+ deUint32)
{
- const struct internalDataStruct *checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
- const Context *context = checkInternalData->context;
- tcu::TestLog& log = context->getTestContext().getLog();
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
- context->getInstanceInterface().getPhysicalDeviceProperties2(context->getPhysicalDevice(), &properties);
-
- const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+ const struct internalDataStruct* checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
+ const Context* context = checkInternalData->context;
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context->getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context->getTestContext().getLog();
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 x = 0u; x < width; ++x)
{
if (data[ndx] > subgroupSizeControlProperties.maxSubgroupSize ||
data[ndx] < subgroupSizeControlProperties.minSubgroupSize)
{
- log << tcu::TestLog::Message << "gl_SubgroupSize (" << data[ndx] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "gl_SubgroupSize (" << data[ndx] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
if (checkInternalData->caseDef.requiredSubgroupSizeMode != REQUIRED_SUBGROUP_SIZE_NONE &&
data[ndx] != checkInternalData->requiredSubgroupSize)
{
- log << tcu::TestLog::Message << "gl_SubgroupSize (" << data[ndx] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "gl_SubgroupSize (" << data[ndx] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
}
return true;
}
-static bool checkCompute (const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
deUint32)
{
- const struct internalDataStruct *checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
- const Context *context = checkInternalData->context;
- tcu::TestLog& log = context->getTestContext().getLog();
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
- context->getInstanceInterface().getPhysicalDeviceProperties2(context->getPhysicalDevice(), &properties);
-
- const deUint32 globalSizeX = numWorkgroups[0] * localSize[0];
- const deUint32 globalSizeY = numWorkgroups[1] * localSize[1];
- const deUint32 globalSizeZ = numWorkgroups[2] * localSize[2];
- const deUint32 width = globalSizeX * globalSizeY * globalSizeZ;
- const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+ const struct internalDataStruct* checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
+ const Context* context = checkInternalData->context;
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context->getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context->getTestContext().getLog();
+ const deUint32 globalSizeX = numWorkgroups[0] * localSize[0];
+ const deUint32 globalSizeY = numWorkgroups[1] * localSize[1];
+ const deUint32 globalSizeZ = numWorkgroups[2] * localSize[2];
+ const deUint32 width = globalSizeX * globalSizeY * globalSizeZ;
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
for (deUint32 i = 0; i < width; i++)
{
if (data[i] > subgroupSizeControlProperties.maxSubgroupSize ||
data[i] < subgroupSizeControlProperties.minSubgroupSize)
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
- << "gl_SubgroupSize (" << data[i] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ << "gl_SubgroupSize (" << data[i] << ") value is outside limits (" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
if (checkInternalData->caseDef.requiredSubgroupSizeMode != REQUIRED_SUBGROUP_SIZE_NONE &&
data[i] != checkInternalData->requiredSubgroupSize)
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
- << "gl_SubgroupSize (" << data[i] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ << "gl_SubgroupSize (" << data[i] << ") is not equal to the required subgroup size value (" << checkInternalData->requiredSubgroupSize << ")" << TestLog::EndMessage;
+
return DE_FALSE;
}
}
return DE_TRUE;
}
-static bool checkComputeRequireFull (const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
+static bool checkComputeRequireFull (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
deUint32)
{
- const struct internalDataStruct *checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
- const Context *context = checkInternalData->context;
- tcu::TestLog& log = context->getTestContext().getLog();
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = &subgroupSizeControlProperties;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context->getInstanceInterface().getPhysicalDeviceProperties2(context->getPhysicalDevice(), &properties);
-
- const deUint32 globalSizeX = numWorkgroups[0] * localSize[0];
- const deUint32 globalSizeY = numWorkgroups[1] * localSize[1];
- const deUint32 globalSizeZ = numWorkgroups[2] * localSize[2];
- const deUint32 width = globalSizeX * globalSizeY * globalSizeZ;
- const UVec4* data = reinterpret_cast<const UVec4*>(datas[0]);
-
- deUint32 numSubgroups = (localSize[0] * localSize[1] * localSize[2]) / checkInternalData->requiredSubgroupSize;
+ const struct internalDataStruct* checkInternalData = reinterpret_cast<const struct internalDataStruct *>(internalData);
+ const Context* context = checkInternalData->context;
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context->getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context->getTestContext().getLog();
+ const deUint32 globalSizeX = numWorkgroups[0] * localSize[0];
+ const deUint32 globalSizeY = numWorkgroups[1] * localSize[1];
+ const deUint32 globalSizeZ = numWorkgroups[2] * localSize[2];
+ const deUint32 width = globalSizeX * globalSizeY * globalSizeZ;
+ const UVec4* data = reinterpret_cast<const UVec4*>(datas[0]);
+ const deUint32 numSubgroups = (localSize[0] * localSize[1] * localSize[2]) / checkInternalData->requiredSubgroupSize;
for (deUint32 i = 0; i < width; i++)
{
if (data[i].x() > subgroupSizeControlProperties.maxSubgroupSize ||
data[i].x() < subgroupSizeControlProperties.minSubgroupSize)
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
- << "gl_SubgroupSize value ( " << data[i].x() << ") is outside limits [" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ << "gl_SubgroupSize value ( " << data[i].x() << ") is outside limits [" << subgroupSizeControlProperties.minSubgroupSize << ", " << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
return DE_FALSE;
}
if (data[i].x() != data[i].y())
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
- << "gl_SubgroupSize ( " << data[i].x() << ") does not match the active number of subgroup invocations (" << data[i].y() << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ << "gl_SubgroupSize ( " << data[i].x() << ") does not match the active number of subgroup invocations (" << data[i].y() << ")" << TestLog::EndMessage;
return DE_FALSE;
}
if (checkInternalData->caseDef.pipelineShaderStageCreateFlags == VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT &&
data[i].x() != checkInternalData->requiredSubgroupSize)
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
- << "expected subgroupSize (" << checkInternalData->requiredSubgroupSize << ") doesn't match gl_SubgroupSize ( " << data[i].x() << ")" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ << "expected subgroupSize (" << checkInternalData->requiredSubgroupSize << ") doesn't match gl_SubgroupSize ( " << data[i].x() << ")" << TestLog::EndMessage;
return DE_FALSE;
}
if (checkInternalData->caseDef.pipelineShaderStageCreateFlags == VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT && data[i].z() != numSubgroups)
{
- log << tcu::TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
+ log << TestLog::Message << "[" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << "] "
<< "expected number of subgroups dispatched (" << numSubgroups << ") doesn't match gl_NumSubgroups (" << data[i].z() << ")";
return DE_FALSE;
}
void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
if (VK_SHADER_STAGE_FRAGMENT_BIT != caseDef.shaderStage)
subgroups::setFragmentShaderFrameBuffer(programCollection);
if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage && VK_SHADER_STAGE_FRAGMENT_BIT != caseDef.shaderStage)
subgroups::setVertexShaderFrameBuffer(programCollection);
- std::string bdyStr = "uint tempResult = gl_SubgroupSize;\n";
+ string bdyStr = "uint tempResult = gl_SubgroupSize;\n";
if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
{
- std::ostringstream vertex;
+ ostringstream vertex;
+
vertex << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< "layout(location = 0) in highp vec4 in_position;\n"
<< " gl_Position = in_position;\n"
<< " gl_PointSize = 1.0f;\n"
<< "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << buildOptions;
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(vertex.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
{
- std::ostringstream geometry;
+ ostringstream geometry;
geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< " EndPrimitive();\n"
<< "}\n";
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
+ programCollection.glslSources.add("geometry") << glu::GeometrySource(geometry.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
{
- std::ostringstream controlSource;
+ ostringstream controlSource;
controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
<< "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
+ programCollection.glslSources.add("tesc") << glu::TessellationControlSource(controlSource.str()) << buildOptions;
subgroups::setTesEvalShaderFrameBuffer(programCollection);
}
else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
{
- std::ostringstream evaluationSource;
+ ostringstream evaluationSource;
evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< "layout(isolines, equal_spacing, ccw ) in;\n"
<< "}\n";
subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
+ programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
}
else if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
{
"}\n";
programCollection.glslSources.add("vert") << glu::VertexSource(vertex) << buildOptions;
- std::ostringstream fragmentSource;
+ ostringstream fragmentSource;
+
fragmentSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
<< "precision highp int;\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< bdyStr
<< " out_color = tempResult;\n"
<< "}\n";
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragmentSource.str()) << buildOptions;
+
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(fragmentSource.str()) << buildOptions;
}
else
{
}
}
-void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+string getExtHeader (const CaseDefinition&)
{
- std::string bdyStr = " uint tempResult = gl_SubgroupSize;\n";
+ return "#extension GL_KHR_shader_subgroup_basic: enable\n";
+}
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- std::ostringstream src;
+vector<string> getPerStageHeadDeclarations (const CaseDefinition& caseDef)
+{
+ const deUint32 stageCount = subgroups::getStagesCount(caseDef.shaderStage);
+ const bool fragment = (caseDef.shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) != 0;
+ vector<string> result (stageCount, string());
- src << "#version 450\n"
- << "#extension GL_KHR_shader_subgroup_basic: enable\n"
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << bdyStr
- << " result[offset] = tempResult;\n"
- << "}\n";
+ if (fragment)
+ result.reserve(result.size() + 1);
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
+ for (size_t i = 0; i < result.size(); ++i)
{
- const string vertex =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
+ result[i] =
+ "layout(set = 0, binding = " + de::toString(i) + ", std430) buffer Buffer1\n"
"{\n"
" uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_VertexIndex] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
+ "};\n";
+ }
- const string tesc =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID] = tempResult;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- "}\n";
+ if (fragment)
+ {
+ const string fragPart =
+ "layout(location = 0) out uint result;\n";
- const string tese =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempResult;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- "}\n";
+ result.push_back(fragPart);
+ }
- const string geometry =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result[gl_PrimitiveIDIn] = tempResult;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- " gl_PointSize = 1.0f;\n"
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
+ return result;
+}
- const string fragment =
- "#version 450\n"
- "#extension GL_KHR_shader_subgroup_basic: enable\n"
- "layout(location = 0) out uint result;\n"
- "void main (void)\n"
- "{\n"
- + bdyStr +
- " result = tempResult;\n"
- "}\n";
+string getTestSource (const CaseDefinition&)
+{
+ return
+ " uint tempResult = gl_SubgroupSize;\n"
+ " tempRes = tempResult;\n";
+}
- subgroups::addNoSubgroupShader(programCollection);
-
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const string extHeader = getExtHeader(caseDef);
+ const string testSrc = getTestSource(caseDef);
+ const vector<string> headDeclarations = getPerStageHeadDeclarations(caseDef);
+
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, VK_FORMAT_R32_UINT, false, extHeader, testSrc, "", headDeclarations);
}
void initProgramsRequireFull (SourceCollections& programCollection, CaseDefinition caseDef)
if (VK_SHADER_STAGE_COMPUTE_BIT != caseDef.shaderStage)
DE_FATAL("Unsupported shader stage");
- std::string bdyStr = " uint tempResult = gl_SubgroupSize;\n";
-
- std::ostringstream src;
+ ostringstream src;
src << "#version 450\n"
<< "#extension GL_KHR_shader_subgroup_basic: enable\n"
<< " result[offset].z = gl_NumSubgroups;" // save the number of subgroups dispatched.
<< "}\n";
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str()) << ShaderBuildOptions(programCollection.usedVulkanVersion, SPIRV_VERSION_1_3, 0u);
}
void supportedCheck (Context& context)
if (!subgroups::isSubgroupSupported(context))
TCU_THROW(NotSupportedError, "Subgroup operations are not supported");
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- {
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroups_size_control extension");
- }
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
}
void supportedCheckFeatures (Context& context, CaseDefinition caseDef)
if (caseDef.shaderStage == VK_SHADER_STAGE_ALL_GRAPHICS)
{
- VkPhysicalDeviceFeatures features;
- context.getInstanceInterface().getPhysicalDeviceFeatures(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceFeatures& features = context.getDeviceFeatures();
+
if (!features.tessellationShader || !features.geometryShader)
TCU_THROW(NotSupportedError, "Device does not support tessellation or geometry shaders");
}
if (caseDef.requiredSubgroupSizeMode != REQUIRED_SUBGROUP_SIZE_NONE ||
caseDef.pipelineShaderStageCreateFlags == VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT)
{
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (caseDef.requiredSubgroupSizeMode != REQUIRED_SUBGROUP_SIZE_NONE)
{
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Device does not support setting required subgroup size for the stages selected");
if (caseDef.pipelineShaderStageCreateFlags == VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT)
{
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
-
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
{
supportedCheckFeatures(context, caseDef);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- struct internalDataStruct internalData =
+ const VkFormat format = VK_FORMAT_R32_UINT;
+ const deUint32& flags = caseDef.pipelineShaderStageCreateFlags;
+ const struct internalDataStruct internalData =
{
&context,
caseDef,
0u,
};
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.pipelineShaderStageCreateFlags, 0u);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.pipelineShaderStageCreateFlags, 0u);
- else if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, caseDef.pipelineShaderStageCreateFlags, 0u);
- else if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
- return subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkFragmentPipelineStages, caseDef.pipelineShaderStageCreateFlags, 0u);
-
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, flags, 0u);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, flags, 0u);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, flags, 0u);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, flags, 0u);
+ case VK_SHADER_STAGE_FRAGMENT_BIT: return subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkFragmentPipelineStages, flags, 0u);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-tcu::TestStatus test (Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ if (isAllComputeStages(caseDef.shaderStage))
{
- const deUint32 numWorkgroups[3] = {1, 1, 1};
- deUint32 subgroupSize = vkt::subgroups::getSubgroupSize(context);
-
- VkPhysicalDeviceProperties physicalDeviceProperties;
- context.getInstanceInterface().getPhysicalDeviceProperties(context.getPhysicalDevice(), &physicalDeviceProperties);
- deUint32 localSizeX, localSizeY, localSizeZ;
+ const deUint32 numWorkgroups[3] = {1, 1, 1};
+ const deUint32 subgroupSize = subgroups::getSubgroupSize(context);
+ const VkPhysicalDeviceProperties physicalDeviceProperties = context.getDeviceProperties();
// Calculate the local workgroup sizes to exercise the maximum supported by the driver
- getLocalSizes(physicalDeviceProperties, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations, localSizeX, localSizeY, localSizeZ);
-
- const deUint32 localSizesToTestCount = 16;
- deUint32 localSizesToTest[localSizesToTestCount][3] =
+ const UVec3 localSize = getLocalSizes(physicalDeviceProperties, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations);
+ const deUint32 localSizesToTestCount = 16;
+ const deUint32 localSizesToTest[localSizesToTestCount][3] =
{
{1, 1, 1},
{32, 4, 1},
{128, 1, 1},
{1, 128, 1},
{1, 1, 64},
- {localSizeX, localSizeY, localSizeZ},
+ {localSize.x(), localSize.y(), localSize.z()},
{1, 1, 1} // Isn't used, just here to make double buffering checks easier
};
-
- struct internalDataStruct internalData =
+ const struct internalDataStruct internalData =
{
&context,
caseDef,
subgroupSize,
};
- return subgroups::makeComputeTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkCompute,
- caseDef.pipelineShaderStageCreateFlags, numWorkgroups, DE_FALSE, subgroupSize,
- localSizesToTest, localSizesToTestCount);
- }
- else
- {
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ return subgroups::makeComputeTestRequiredSubgroupSize(context,
+ VK_FORMAT_R32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkCompute,
+ caseDef.pipelineShaderStageCreateFlags,
+ numWorkgroups,
+ DE_FALSE,
+ subgroupSize,
+ localSizesToTest,
+ localSizesToTestCount);
+ }
+ else if (isAllGraphicsStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ struct internalDataStruct internalData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ &context,
+ caseDef,
+ 0u,
+ };
- struct internalDataStruct internalData =
+ return subgroups::allStagesRequiredSubgroupSize(context,
+ VK_FORMAT_R32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkVertexPipelineStages,
+ stages,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ DE_NULL);
+ }
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const vector<deUint32> flags (6, caseDef.pipelineShaderStageCreateFlags);
+ const struct internalDataStruct internalData =
{
&context,
caseDef,
0u,
};
- return subgroups::allStagesRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, stages,
- caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags,
- caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags, DE_NULL);
+ return subgroups::allRayTracingStagesRequiredSubgroupSize(context,
+ VK_FORMAT_R32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkVertexPipelineStages,
+ stages,
+ flags.data(),
+ DE_NULL);
}
- return tcu::TestStatus::pass("OK");
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
-tcu::TestStatus testRequireFullSubgroups (Context& context, const CaseDefinition caseDef)
+TestStatus testRequireFullSubgroups (Context& context, const CaseDefinition caseDef)
{
DE_ASSERT(VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage);
DE_ASSERT(caseDef.requiredSubgroupSizeMode == REQUIRED_SUBGROUP_SIZE_NONE);
- const deUint32 numWorkgroups[3] = {1, 1, 1};
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkPhysicalDeviceProperties physicalDeviceProperties;
- context.getInstanceInterface().getPhysicalDeviceProperties(context.getPhysicalDevice(), &physicalDeviceProperties);
-
- deUint32 localSizeX, localSizeY, localSizeZ;
+ const deUint32 numWorkgroups[3] = {1, 1, 1};
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const VkPhysicalDeviceProperties& physicalDeviceProperties = context.getDeviceProperties();
// Calculate the local workgroup sizes to exercise the maximum supported by the driver
- getLocalSizes(physicalDeviceProperties, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations, localSizeX, localSizeY, localSizeZ);
-
- const deUint32 subgroupSize = vkt::subgroups::getSubgroupSize(context);
-
+ const UVec3 localSize = getLocalSizes(physicalDeviceProperties, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations);
+ const deUint32 subgroupSize = subgroups::getSubgroupSize(context);
// For full subgroups and allow varying subgroup size, localsize X must be a multiple of maxSubgroupSize.
// We set local size X for this test to the maximum, regardless if allow varying subgroup size is enabled or not.
- const deUint32 localSizesToTestCount = 7;
- deUint32 localSizesToTest[localSizesToTestCount][3] =
+ const deUint32 localSizesToTestCount = 7;
+ const deUint32 localSizesToTest[localSizesToTestCount][3] =
{
{subgroupSizeControlProperties.maxSubgroupSize, 1, 1},
{subgroupSizeControlProperties.maxSubgroupSize, 4, 1},
{subgroupSizeControlProperties.maxSubgroupSize, 1, 4},
{subgroupSizeControlProperties.maxSubgroupSize * 2, 1, 2},
{subgroupSizeControlProperties.maxSubgroupSize * 4, 1, 1},
- {localSizeX, localSizeY, localSizeZ},
+ {localSize.x(), localSize.y(), localSize.z()},
{1, 1, 1} // Isn't used, just here to make double buffering checks easier
};
-
- struct internalDataStruct internalData =
+ const struct internalDataStruct internalData =
{
&context,
caseDef,
subgroupSize,
};
- return subgroups::makeComputeTestRequiredSubgroupSize(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, &internalData, checkComputeRequireFull,
- caseDef.pipelineShaderStageCreateFlags, numWorkgroups, DE_FALSE, subgroupSize,
- localSizesToTest, localSizesToTestCount);
+ return subgroups::makeComputeTestRequiredSubgroupSize(context,
+ VK_FORMAT_R32G32B32A32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkComputeRequireFull,
+ caseDef.pipelineShaderStageCreateFlags,
+ numWorkgroups,
+ DE_FALSE,
+ subgroupSize,
+ localSizesToTest,
+ localSizesToTestCount);
}
-tcu::TestStatus testRequireSubgroupSize (Context& context, const CaseDefinition caseDef)
+TestStatus testRequireSubgroupSize (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
- {
- const deUint32 numWorkgroups[3] = {1, 1, 1};
-
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties2;
- properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties2.pNext = &subgroupSizeControlProperties;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
-
- VkPhysicalDeviceProperties physicalDeviceProperties;
- context.getInstanceInterface().getPhysicalDeviceProperties(context.getPhysicalDevice(), &physicalDeviceProperties);
-
- deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
-
- const deUint64 maxSubgroupLimitSize = (deUint64)requiredSubgroupSize * subgroupSizeControlProperties.maxComputeWorkgroupSubgroups;
- const deUint32 maxTotalLocalSize = (deUint32)std::min<deUint64>(maxSubgroupLimitSize, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations);
- deUint32 localSizeX, localSizeY, localSizeZ;
- getLocalSizes(physicalDeviceProperties, maxTotalLocalSize, localSizeX, localSizeY, localSizeZ);
-
- const deUint32 localSizesToTestCount = 5;
- deUint32 localSizesToTest[localSizesToTestCount][3] =
+ if (isAllComputeStages(caseDef.shaderStage))
+ {
+ const deUint32 numWorkgroups[3] = {1, 1, 1};
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const VkPhysicalDeviceProperties& physicalDeviceProperties = context.getDeviceProperties();
+ const deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
+ const deUint64 maxSubgroupLimitSize = (deUint64)requiredSubgroupSize * subgroupSizeControlProperties.maxComputeWorkgroupSubgroups;
+ const deUint32 maxTotalLocalSize = (deUint32)min<deUint64>(maxSubgroupLimitSize, physicalDeviceProperties.limits.maxComputeWorkGroupInvocations);
+ const UVec3 localSize = getLocalSizes(physicalDeviceProperties, maxTotalLocalSize);
+ const deUint32 localSizesToTestCount = 5;
+ const deUint32 localSizesToTest[localSizesToTestCount][3] =
{
{requiredSubgroupSize, 1, 1},
{1, requiredSubgroupSize, 1},
{1, 1, requiredSubgroupSize},
- {localSizeX, localSizeY, localSizeZ},
+ {localSize.x(), localSize.y(), localSize.z()},
{1, 1, 1} // Isn't used, just here to make double buffering checks easier
};
-
- struct internalDataStruct internalData =
+ struct internalDataStruct internalData =
{
- &context,
- caseDef,
- requiredSubgroupSize,
+ &context, // const Context* context;
+ caseDef, // struct CaseDefinition caseDef;
+ requiredSubgroupSize, // deUint32 requiredSubgroupSize;
};
// Depending on the flag we need to run one verification function or another.
- return subgroups::makeComputeTestRequiredSubgroupSize(context, VK_FORMAT_R32G32B32A32_UINT, DE_NULL, 0, &internalData,
+ return subgroups::makeComputeTestRequiredSubgroupSize(context,
+ VK_FORMAT_R32G32B32A32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
caseDef.pipelineShaderStageCreateFlags == VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT ? checkComputeRequireFull : checkCompute,
- caseDef.pipelineShaderStageCreateFlags, numWorkgroups, DE_TRUE, requiredSubgroupSize,
- localSizesToTest, localSizesToTestCount);
- }
- else
- {
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = &subgroupSizeControlProperties;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if ( VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ caseDef.pipelineShaderStageCreateFlags,
+ numWorkgroups,
+ DE_TRUE,
+ requiredSubgroupSize,
+ localSizesToTest,
+ localSizesToTestCount);
+ }
+ else if (isAllGraphicsStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
+ const deUint32 requiredSubgroupSizes[5] = { requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize};
+ const struct internalDataStruct internalData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+ &context, // const Context* context;
+ caseDef, // struct CaseDefinition caseDef;
+ requiredSubgroupSize, // deUint32 requiredSubgroupSize;
+ };
- deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
- const deUint32 requiredSubgroupSizes[5] = { requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize, requiredSubgroupSize};
- struct internalDataStruct internalData =
+ return subgroups::allStagesRequiredSubgroupSize(context,
+ VK_FORMAT_R32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkVertexPipelineStages,
+ stages,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ caseDef.pipelineShaderStageCreateFlags,
+ requiredSubgroupSizes);
+ }
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
+ const vector<deUint32> flags (6, caseDef.pipelineShaderStageCreateFlags);
+ const vector<deUint32> requiredSubgroupSizes (6, requiredSubgroupSize);
+ const struct internalDataStruct internalData =
{
- &context,
- caseDef,
- requiredSubgroupSize,
+ &context, // const Context* context;
+ caseDef, // struct CaseDefinition caseDef;
+ requiredSubgroupSize, // deUint32 requiredSubgroupSize;
};
- return subgroups::allStagesRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, stages,
- caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags,
- caseDef.pipelineShaderStageCreateFlags, caseDef.pipelineShaderStageCreateFlags, requiredSubgroupSizes);
+
+ return subgroups::allRayTracingStagesRequiredSubgroupSize(context,
+ VK_FORMAT_R32_UINT,
+ DE_NULL,
+ 0,
+ &internalData,
+ checkVertexPipelineStages,
+ stages,
+ flags.data(),
+ requiredSubgroupSizes.data());
}
- return tcu::TestStatus::pass("OK");
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
-tcu::TestStatus noSSBOtestRequireSubgroupSize (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtestRequireSubgroupSize (Context& context, const CaseDefinition caseDef)
{
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = &subgroupSizeControlProperties;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
- struct internalDataStruct internalData =
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const deUint32 requiredSubgroupSize = getRequiredSubgroupSizeFromMode(context, caseDef, subgroupSizeControlProperties);
+ const VkFormat format = VK_FORMAT_R32_UINT;
+ const deUint32& flags = caseDef.pipelineShaderStageCreateFlags;
+ const deUint32& size = requiredSubgroupSize;
+ struct internalDataStruct internalData =
{
&context,
caseDef,
requiredSubgroupSize,
};
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.pipelineShaderStageCreateFlags, requiredSubgroupSize);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.pipelineShaderStageCreateFlags, requiredSubgroupSize);
- else if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, caseDef.pipelineShaderStageCreateFlags, requiredSubgroupSize);
- else if (VK_SHADER_STAGE_FRAGMENT_BIT & caseDef.shaderStage)
- return subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize(context, VK_FORMAT_R32_UINT, DE_NULL, 0, &internalData, checkFragmentPipelineStages, caseDef.pipelineShaderStageCreateFlags, requiredSubgroupSize);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, flags, size);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, flags, size);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, flags, size);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkVertexPipelineStages, caseDef.shaderStage, flags, size);
+ case VK_SHADER_STAGE_FRAGMENT_BIT: return subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize(context, format, DE_NULL, 0, &internalData, checkFragmentPipelineStages, flags, size);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-tcu::TestStatus testSanitySubgroupSizeProperties (Context& context)
+TestStatus testSanitySubgroupSizeProperties (Context& context)
{
VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
if (subgroupProperties.subgroupSize > subgroupSizeControlProperties.maxSubgroupSize ||
subgroupProperties.subgroupSize < subgroupSizeControlProperties.minSubgroupSize)
{
- std::ostringstream error;
+ ostringstream error;
error << "subgroupSize (" << subgroupProperties.subgroupSize << ") is not between maxSubgroupSize (";
error << subgroupSizeControlProperties.maxSubgroupSize << ") and minSubgroupSize (";
error << subgroupSizeControlProperties.minSubgroupSize << ")";
- return tcu::TestStatus::fail(error.str().c_str());
+
+ return TestStatus::fail(error.str().c_str());
}
- return tcu::TestStatus::pass("OK");
+ return TestStatus::pass("OK");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsSizeControlTests (tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsSizeControlTests (TestContext& testCtx)
{
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "size_control", "VK_EXT_subgroup_size_control tests"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup size control category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup size control category tests: compute"));
+ de::MovePtr<TestCaseGroup> graphicsGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup size control category tests: graphics"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup size control category tests: ray tracing"));
+ de::MovePtr<TestCaseGroup> genericGroup (new TestCaseGroup(testCtx, "generic", "Subgroup size control category tests: generic"));
+ const VkShaderStageFlags stages[] =
{
VK_SHADER_STAGE_VERTEX_BIT,
VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
VK_SHADER_STAGE_FRAGMENT_BIT,
};
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "size_control", "VK_EXT_subgroup_size_control tests"));
-
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup size control category tests: framebuffer"));
-
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup size control category tests: compute"));
-
- de::MovePtr<tcu::TestCaseGroup> graphicsGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup size control category tests: graphics"));
-
- de::MovePtr<tcu::TestCaseGroup> genericGroup(new tcu::TestCaseGroup(
- testCtx, "generic", "Subgroup size control category tests: generic"));
-
// Test sanity of the subgroup size properties.
{
addFunctionCase(genericGroup.get(), "subgroup_size_properties", "", supportedCheck, testSanitySubgroupSizeProperties);
addFunctionCaseWithPrograms(computeGroup.get(), "allow_varying_subgroup_size", "", supportedCheckFeatures, initPrograms, test, caseDefCompute);
const CaseDefinition caseDefAllGraphics = {VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT, VK_SHADER_STAGE_ALL_GRAPHICS, DE_FALSE, REQUIRED_SUBGROUP_SIZE_NONE};
addFunctionCaseWithPrograms(graphicsGroup.get(), "allow_varying_subgroup_size", "", supportedCheckFeaturesShader, initPrograms, test, caseDefAllGraphics);
+ const CaseDefinition caseDefAllRaytracing = {VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT, SHADER_STAGE_ALL_RAY_TRACING, DE_FALSE, REQUIRED_SUBGROUP_SIZE_NONE};
+ addFunctionCaseWithPrograms(raytracingGroup.get(), "allow_varying_subgroup_size", "", supportedCheckFeaturesShader, initPrograms, test, caseDefAllRaytracing);
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
addFunctionCaseWithPrograms(graphicsGroup.get(), "required_subgroup_size_max", "", supportedCheckFeaturesShader, initPrograms, testRequireSubgroupSize, caseDefAllGraphicsMax);
const CaseDefinition caseDefComputeMax = {0u, VK_SHADER_STAGE_COMPUTE_BIT, DE_FALSE, REQUIRED_SUBGROUP_SIZE_MAX};
addFunctionCaseWithPrograms(computeGroup.get(), "required_subgroup_size_max", "", supportedCheckFeatures, initPrograms, testRequireSubgroupSize, caseDefComputeMax);
+ const CaseDefinition caseDefAllRaytracingMax = {0u, SHADER_STAGE_ALL_RAY_TRACING, DE_FALSE, REQUIRED_SUBGROUP_SIZE_MAX};
+ addFunctionCaseWithPrograms(raytracingGroup.get(), "required_subgroup_size_max", "", supportedCheckFeaturesShader, initPrograms, testRequireSubgroupSize, caseDefAllRaytracingMax);
const CaseDefinition caseDefAllGraphicsMin = {0u, VK_SHADER_STAGE_ALL_GRAPHICS, DE_FALSE, REQUIRED_SUBGROUP_SIZE_MIN};
addFunctionCaseWithPrograms(graphicsGroup.get(), "required_subgroup_size_min", "", supportedCheckFeaturesShader, initPrograms, testRequireSubgroupSize, caseDefAllGraphicsMin);
const CaseDefinition caseDefComputeMin = {0u, VK_SHADER_STAGE_COMPUTE_BIT, DE_FALSE, REQUIRED_SUBGROUP_SIZE_MIN};
addFunctionCaseWithPrograms(computeGroup.get(), "required_subgroup_size_min", "", supportedCheckFeatures, initPrograms, testRequireSubgroupSize, caseDefComputeMin);
+ const CaseDefinition caseDefAllRaytracingMin = {0u, SHADER_STAGE_ALL_RAY_TRACING, DE_FALSE, REQUIRED_SUBGROUP_SIZE_MIN};
+ addFunctionCaseWithPrograms(raytracingGroup.get(), "required_subgroup_size_min", "", supportedCheckFeaturesShader, initPrograms, testRequireSubgroupSize, caseDefAllRaytracingMin);
for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
const CaseDefinition caseDefStageMax = {0u, stages[stageIndex], DE_FALSE, REQUIRED_SUBGROUP_SIZE_MAX};
group->addChild(graphicsGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
+ group->addChild(raytracingGroup.release());
return group.release();
}
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsSizeControlTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsSizeControlTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
namespace subgroups
{
-tcu::TestCaseGroup* createTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
*/ /*--------------------------------------------------------------------*/
#include "vktSubgroupsTestsUtils.hpp"
+#include "vkRayTracingUtil.hpp"
#include "deFloat16.h"
#include "deRandom.hpp"
#include "tcuCommandLine.hpp"
#include "vkTypeUtil.hpp"
#include "vkCmdUtil.hpp"
#include "vkObjUtil.hpp"
+
using namespace tcu;
using namespace std;
using namespace vk;
}
}
-deUint32 getFormatSizeInBytes(const VkFormat format)
+deUint32 getFormatSizeInBytes (const VkFormat format)
{
switch (format)
{
}
}
-deUint32 getElementSizeInBytes(
- const VkFormat format,
- const subgroups::SSBOData::InputDataLayoutType layout)
+deUint32 getElementSizeInBytes (const VkFormat format,
+ const subgroups::SSBOData::InputDataLayoutType layout)
{
- deUint32 bytes = getFormatSizeInBytes(format);
+ const deUint32 bytes = getFormatSizeInBytes(format);
+
if (layout == subgroups::SSBOData::LayoutStd140)
return bytes < 16 ? 16 : bytes;
else
return bytes;
}
-Move<VkRenderPass> makeRenderPass(Context& context, VkFormat format)
+Move<VkRenderPass> makeRenderPass (Context& context, VkFormat format)
{
- VkAttachmentReference colorReference = {
- 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
+ const VkAttachmentReference colorReference =
+ {
+ 0,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL
};
-
- const VkSubpassDescription subpassDescription = {0u,
- VK_PIPELINE_BIND_POINT_GRAPHICS, 0, DE_NULL, 1, &colorReference,
- DE_NULL, DE_NULL, 0, DE_NULL
- };
-
- const VkSubpassDependency subpassDependencies[2] = {
- { VK_SUBPASS_EXTERNAL, 0u, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
- VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
- VK_ACCESS_MEMORY_READ_BIT, VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
- VK_DEPENDENCY_BY_REGION_BIT
+ const VkSubpassDescription subpassDescription =
+ {
+ 0u, // VkSubpassDescriptionFlags flags;
+ VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
+ 0, // deUint32 inputAttachmentCount;
+ DE_NULL, // const VkAttachmentReference* pInputAttachments;
+ 1, // deUint32 colorAttachmentCount;
+ &colorReference, // const VkAttachmentReference* pColorAttachments;
+ DE_NULL, // const VkAttachmentReference* pResolveAttachments;
+ DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
+ 0, // deUint32 preserveAttachmentCount;
+ DE_NULL // const deUint32* pPreserveAttachments;
+ };
+ const VkSubpassDependency subpassDependencies[2] =
+ {
+ {
+ VK_SUBPASS_EXTERNAL, // deUint32 srcSubpass;
+ 0u, // deUint32 dstSubpass;
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, // VkPipelineStageFlags srcStageMask;
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VkPipelineStageFlags dstStageMask;
+ VK_ACCESS_MEMORY_READ_BIT, // VkAccessFlags srcAccessMask;
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
+ VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags;
},
- { 0u, VK_SUBPASS_EXTERNAL, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
- VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
- VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
- VK_ACCESS_MEMORY_READ_BIT, VK_DEPENDENCY_BY_REGION_BIT
+ {
+ 0u, // deUint32 srcSubpass;
+ VK_SUBPASS_EXTERNAL, // deUint32 dstSubpass;
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, // VkPipelineStageFlags srcStageMask;
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, // VkPipelineStageFlags dstStageMask;
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
+ VK_ACCESS_MEMORY_READ_BIT, // VkAccessFlags dstAccessMask;
+ VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags;
},
};
-
- VkAttachmentDescription attachmentDescription = {0u, format,
- VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_CLEAR,
- VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_DONT_CARE,
- VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED,
- VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL
- };
-
- const VkRenderPassCreateInfo renderPassCreateInfo = {
- VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, DE_NULL, 0u, 1,
- &attachmentDescription, 1, &subpassDescription, 2, subpassDependencies
+ const VkAttachmentDescription attachmentDescription =
+ {
+ 0u, // VkAttachmentDescriptionFlags flags;
+ format, // VkFormat format;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
+ VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
+ VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL // VkImageLayout finalLayout;
+ };
+ const VkRenderPassCreateInfo renderPassCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkRenderPassCreateFlags flags;
+ 1, // deUint32 attachmentCount;
+ &attachmentDescription, // const VkAttachmentDescription* pAttachments;
+ 1, // deUint32 subpassCount;
+ &subpassDescription, // const VkSubpassDescription* pSubpasses;
+ 2, // deUint32 dependencyCount;
+ subpassDependencies // const VkSubpassDependency* pDependencies;
};
- return createRenderPass(context.getDeviceInterface(), context.getDevice(),
- &renderPassCreateInfo);
+ return createRenderPass(context.getDeviceInterface(), context.getDevice(), &renderPassCreateInfo);
}
-Move<VkPipeline> makeGraphicsPipeline(const DeviceInterface& vk,
- const VkDevice device,
- const VkPipelineLayout pipelineLayout,
- const VkShaderModule vertexShaderModule,
- const VkShaderModule tessellationControlShaderModule,
- const VkShaderModule tessellationEvalShaderModule,
- const VkShaderModule geometryShaderModule,
- const VkShaderModule fragmentShaderModule,
- const VkRenderPass renderPass,
- const std::vector<VkViewport>& viewports,
- const std::vector<VkRect2D>& scissors,
- const VkPrimitiveTopology topology,
- const deUint32 subpass,
- const deUint32 patchControlPoints,
- const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo,
- const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo,
- const VkPipelineMultisampleStateCreateInfo* multisampleStateCreateInfo,
- const VkPipelineDepthStencilStateCreateInfo* depthStencilStateCreateInfo,
- const VkPipelineColorBlendStateCreateInfo* colorBlendStateCreateInfo,
- const VkPipelineDynamicStateCreateInfo* dynamicStateCreateInfo,
- const deUint32 vertexShaderStageCreateFlags,
- const deUint32 tessellationControlShaderStageCreateFlags,
- const deUint32 tessellationEvalShaderStageCreateFlags,
- const deUint32 geometryShaderStageCreateFlags,
- const deUint32 fragmentShaderStageCreateFlags,
- const deUint32 requiredSubgroupSize[5])
+Move<VkPipeline> makeGraphicsPipeline (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkPipelineLayout pipelineLayout,
+ const VkShaderModule vertexShaderModule,
+ const VkShaderModule tessellationControlShaderModule,
+ const VkShaderModule tessellationEvalShaderModule,
+ const VkShaderModule geometryShaderModule,
+ const VkShaderModule fragmentShaderModule,
+ const VkRenderPass renderPass,
+ const std::vector<VkViewport>& viewports,
+ const std::vector<VkRect2D>& scissors,
+ const VkPrimitiveTopology topology,
+ const deUint32 subpass,
+ const deUint32 patchControlPoints,
+ const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo,
+ const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo,
+ const VkPipelineMultisampleStateCreateInfo* multisampleStateCreateInfo,
+ const VkPipelineDepthStencilStateCreateInfo* depthStencilStateCreateInfo,
+ const VkPipelineColorBlendStateCreateInfo* colorBlendStateCreateInfo,
+ const VkPipelineDynamicStateCreateInfo* dynamicStateCreateInfo,
+ const deUint32 vertexShaderStageCreateFlags,
+ const deUint32 tessellationControlShaderStageCreateFlags,
+ const deUint32 tessellationEvalShaderStageCreateFlags,
+ const deUint32 geometryShaderStageCreateFlags,
+ const deUint32 fragmentShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize[5])
{
const VkBool32 disableRasterization = (fragmentShaderModule == DE_NULL);
const bool hasTessellation = (tessellationControlShaderModule != DE_NULL || tessellationEvalShaderModule != DE_NULL);
std::vector<VkPipelineShaderStageCreateInfo> pipelineShaderStageParams;
const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT requiredSubgroupSizeCreateInfo[5] =
+ {
{
- {
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- DE_NULL,
- requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[0] : 0u,
- },
- {
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- DE_NULL,
- requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[1] : 0u,
- },
- {
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- DE_NULL,
- requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[2] : 0u,
- },
- {
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- DE_NULL,
- requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[3] : 0u,
- },
- {
- VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
- DE_NULL,
- requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[4] : 0u,
- },
- };
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[0] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[1] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[2] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[3] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[4] : 0u,
+ },
+ };
+
{
stageCreateInfo.pNext = (requiredSubgroupSizeCreateInfo[0].requiredSubgroupSize != 0u) ? &requiredSubgroupSizeCreateInfo[0] : DE_NULL;
stageCreateInfo.flags = vertexShaderStageCreateFlags;
return createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
}
-Move<VkPipeline> makeGraphicsPipeline(Context& context,
- const VkPipelineLayout pipelineLayout,
- const VkShaderStageFlags stages,
- const VkShaderModule vertexShaderModule,
- const VkShaderModule fragmentShaderModule,
- const VkShaderModule geometryShaderModule,
- const VkShaderModule tessellationControlModule,
- const VkShaderModule tessellationEvaluationModule,
- const VkRenderPass renderPass,
- const VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
- const VkVertexInputBindingDescription* vertexInputBindingDescription = DE_NULL,
- const VkVertexInputAttributeDescription* vertexInputAttributeDescriptions = DE_NULL,
- const bool frameBufferTests = false,
- const vk::VkFormat attachmentFormat = VK_FORMAT_R32G32B32A32_SFLOAT,
- const deUint32 vertexShaderStageCreateFlags = 0u,
- const deUint32 tessellationControlShaderStageCreateFlags = 0u,
- const deUint32 tessellationEvalShaderStageCreateFlags = 0u,
- const deUint32 geometryShaderStageCreateFlags = 0u,
- const deUint32 fragmentShaderStageCreateFlags = 0u,
- const deUint32 requiredSubgroupSize[5] = DE_NULL)
+Move<VkPipeline> makeGraphicsPipeline (Context& context,
+ const VkPipelineLayout pipelineLayout,
+ const VkShaderStageFlags stages,
+ const VkShaderModule vertexShaderModule,
+ const VkShaderModule fragmentShaderModule,
+ const VkShaderModule geometryShaderModule,
+ const VkShaderModule tessellationControlModule,
+ const VkShaderModule tessellationEvaluationModule,
+ const VkRenderPass renderPass,
+ const VkPrimitiveTopology topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
+ const VkVertexInputBindingDescription* vertexInputBindingDescription = DE_NULL,
+ const VkVertexInputAttributeDescription* vertexInputAttributeDescriptions = DE_NULL,
+ const bool frameBufferTests = false,
+ const vk::VkFormat attachmentFormat = VK_FORMAT_R32G32B32A32_SFLOAT,
+ const deUint32 vertexShaderStageCreateFlags = 0u,
+ const deUint32 tessellationControlShaderStageCreateFlags = 0u,
+ const deUint32 tessellationEvalShaderStageCreateFlags = 0u,
+ const deUint32 geometryShaderStageCreateFlags = 0u,
+ const deUint32 fragmentShaderStageCreateFlags = 0u,
+ const deUint32 requiredSubgroupSize[5] = DE_NULL)
{
- std::vector<VkViewport> noViewports;
- std::vector<VkRect2D> noScissors;
-
- const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
+ const std::vector<VkViewport> noViewports;
+ const std::vector<VkRect2D> noScissors;
+ const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
vertexInputAttributeDescriptions == DE_NULL ? 0u : 1u, // deUint32 vertexAttributeDescriptionCount;
vertexInputAttributeDescriptions, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
};
-
- const deUint32 numChannels = getNumUsedChannels(mapVkFormat(attachmentFormat).order);
- const VkColorComponentFlags colorComponent =
- numChannels == 1 ? VK_COLOR_COMPONENT_R_BIT :
- numChannels == 2 ? VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT :
- numChannels == 3 ? VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT :
- VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
-
- const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
- {
- VK_FALSE, VK_BLEND_FACTOR_ZERO, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
- VK_BLEND_FACTOR_ZERO, VK_BLEND_FACTOR_ZERO, VK_BLEND_OP_ADD,
- colorComponent
+ const deUint32 numChannels = getNumUsedChannels(mapVkFormat(attachmentFormat).order);
+ const VkColorComponentFlags colorComponent = numChannels == 1 ? VK_COLOR_COMPONENT_R_BIT :
+ numChannels == 2 ? VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT :
+ numChannels == 3 ? VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT :
+ VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
+ const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
+ {
+ VK_FALSE, // VkBool32 blendEnable;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor srcColorBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstColorBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor srcAlphaBlendFactor;
+ VK_BLEND_FACTOR_ZERO, // VkBlendFactor dstAlphaBlendFactor;
+ VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp;
+ colorComponent // VkColorComponentFlags colorWriteMask;
};
-
- const VkPipelineColorBlendStateCreateInfo colorBlendStateCreateInfo =
- {
- VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, DE_NULL, 0u,
- VK_FALSE, VK_LOGIC_OP_CLEAR, 1, &colorBlendAttachmentState,
- { 0.0f, 0.0f, 0.0f, 0.0f }
+ const VkPipelineColorBlendStateCreateInfo colorBlendStateCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineColorBlendStateCreateFlags flags;
+ VK_FALSE, // VkBool32 logicOpEnable;
+ VK_LOGIC_OP_CLEAR, // VkLogicOp logicOp;
+ 1, // deUint32 attachmentCount;
+ &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
+ { 0.0f, 0.0f, 0.0f, 0.0f } // float blendConstants[4];
};
-
- const deUint32 patchControlPoints = (VK_SHADER_STAGE_FRAGMENT_BIT & stages && frameBufferTests) ? 2u : 1u;
+ const deUint32 patchControlPoints = (VK_SHADER_STAGE_FRAGMENT_BIT & stages && frameBufferTests) ? 2u : 1u;
return makeGraphicsPipeline(context.getDeviceInterface(), // const DeviceInterface& vk
context.getDevice(), // const VkDevice device
requiredSubgroupSize); // const deUint32 requiredSubgroupSize[5]
}
-Move<VkCommandBuffer> makeCommandBuffer(
- Context& context, const VkCommandPool commandPool)
+Move<VkCommandBuffer> makeCommandBuffer (Context& context, const VkCommandPool commandPool)
{
const VkCommandBufferAllocateInfo bufferAllocateParams =
{
struct Buffer : public BufferOrImage
{
- explicit Buffer(
- Context& context, VkDeviceSize sizeInBytes, VkBufferUsageFlags usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)
+ explicit Buffer (Context& context, VkDeviceSize sizeInBytes, VkBufferUsageFlags usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)
: BufferOrImage (false)
, m_sizeInBytes (sizeInBytes)
, m_usage (usage)
struct Image : public BufferOrImage
{
- explicit Image(Context& context, deUint32 width, deUint32 height,
- VkFormat format, VkImageUsageFlags usage = VK_IMAGE_USAGE_STORAGE_BIT)
+ explicit Image (Context& context, deUint32 width, deUint32 height, VkFormat format, VkImageUsageFlags usage = VK_IMAGE_USAGE_STORAGE_BIT)
: BufferOrImage(true)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkImageCreateInfo imageCreateInfo =
{
- VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, DE_NULL, 0, VK_IMAGE_TYPE_2D,
- format, {width, height, 1}, 1, 1, VK_SAMPLE_COUNT_1_BIT,
- VK_IMAGE_TILING_OPTIMAL, usage,
- VK_SHARING_MODE_EXCLUSIVE, 0u, DE_NULL,
- VK_IMAGE_LAYOUT_UNDEFINED
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0, // VkImageCreateFlags flags;
+ VK_IMAGE_TYPE_2D, // VkImageType imageType;
+ format, // VkFormat format;
+ {width, height, 1}, // VkExtent3D extent;
+ 1, // deUint32 mipLevels;
+ 1, // deUint32 arrayLayers;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ usage, // VkImageUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
};
const VkComponentMapping componentMapping =
const VkSamplerCreateInfo samplerCreateInfo =
{
- VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
- DE_NULL,
- 0u,
- VK_FILTER_NEAREST,
- VK_FILTER_NEAREST,
- VK_SAMPLER_MIPMAP_MODE_NEAREST,
- VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
- VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
- VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
- 0.0f,
- VK_FALSE,
- 1.0f,
- DE_FALSE,
- VK_COMPARE_OP_ALWAYS,
- 0.0f,
- 0.0f,
- VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK,
- VK_FALSE,
+ VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkSamplerCreateFlags flags;
+ VK_FILTER_NEAREST, // VkFilter magFilter;
+ VK_FILTER_NEAREST, // VkFilter minFilter;
+ VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // VkSamplerAddressMode addressModeU;
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // VkSamplerAddressMode addressModeV;
+ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE, // VkSamplerAddressMode addressModeW;
+ 0.0f, // float mipLodBias;
+ VK_FALSE, // VkBool32 anisotropyEnable;
+ 1.0f, // float maxAnisotropy;
+ DE_FALSE, // VkBool32 compareEnable;
+ VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
+ 0.0f, // float minLod;
+ 0.0f, // float maxLod;
+ VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
+ VK_FALSE, // VkBool32 unnormalizedCoordinates;
};
m_image = createImage(vk, device, &imageCreateInfo);
const VkImageViewCreateInfo imageViewCreateInfo =
{
- VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, DE_NULL, 0, *m_image,
- VK_IMAGE_VIEW_TYPE_2D, imageCreateInfo.format, componentMapping,
- subresourceRange
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0, // VkImageViewCreateFlags flags;
+ *m_image, // VkImage image;
+ VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
+ imageCreateInfo.format, // VkFormat format;
+ componentMapping, // VkComponentMapping components;
+ subresourceRange // VkImageSubresourceRange subresourceRange;
};
m_imageView = createImageView(vk, device, &imageViewCreateInfo);
}
private:
- Move<VkImage> m_image;
- Move<VkImageView> m_imageView;
- Move<VkSampler> m_sampler;
+ Move<VkImage> m_image;
+ Move<VkImageView> m_imageView;
+ Move<VkSampler> m_sampler;
};
}
-std::string vkt::subgroups::getSharedMemoryBallotHelper()
+deUint32 vkt::subgroups::getStagesCount (const VkShaderStageFlags shaderStages)
+{
+ const deUint32 stageCount = isAllRayTracingStages(shaderStages) ? 6
+ : isAllGraphicsStages(shaderStages) ? 4
+ : isAllComputeStages(shaderStages) ? 1
+ : 0;
+
+ DE_ASSERT(stageCount != 0);
+
+ return stageCount;
+}
+
+std::string vkt::subgroups::getSharedMemoryBallotHelper ()
{
return "shared uvec4 superSecretComputeShaderHelper[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z];\n"
"uvec4 sharedMemoryBallot(bool vote)\n"
"}\n";
}
-std::string vkt::subgroups::getSharedMemoryBallotHelperARB()
+std::string vkt::subgroups::getSharedMemoryBallotHelperARB ()
{
return "shared uvec4 superSecretComputeShaderHelper[gl_WorkGroupSize.x * gl_WorkGroupSize.y * gl_WorkGroupSize.z];\n"
"uint64_t sharedMemoryBallot(bool vote)\n"
"}\n";
}
-deUint32 vkt::subgroups::getSubgroupSize(Context& context)
+deUint32 vkt::subgroups::getSubgroupSize (Context& context)
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- return subgroupProperties.subgroupSize;
+ return context.getSubgroupProperties().subgroupSize;
}
-VkDeviceSize vkt::subgroups::maxSupportedSubgroupSize() {
+deUint32 vkt::subgroups::maxSupportedSubgroupSize ()
+{
return 128u;
}
-std::string vkt::subgroups::getShaderStageName(VkShaderStageFlags stage)
+std::string vkt::subgroups::getShaderStageName (VkShaderStageFlags stage)
{
switch (stage)
{
- default:
- DE_FATAL("Unhandled stage!");
- return "";
- case VK_SHADER_STAGE_COMPUTE_BIT:
- return "compute";
- case VK_SHADER_STAGE_FRAGMENT_BIT:
- return "fragment";
- case VK_SHADER_STAGE_VERTEX_BIT:
- return "vertex";
- case VK_SHADER_STAGE_GEOMETRY_BIT:
- return "geometry";
- case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
- return "tess_control";
- case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
- return "tess_eval";
+ case VK_SHADER_STAGE_COMPUTE_BIT: return "compute";
+ case VK_SHADER_STAGE_FRAGMENT_BIT: return "fragment";
+ case VK_SHADER_STAGE_VERTEX_BIT: return "vertex";
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return "geometry";
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return "tess_control";
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return "tess_eval";
+ case VK_SHADER_STAGE_RAYGEN_BIT_KHR: return "rgen";
+ case VK_SHADER_STAGE_ANY_HIT_BIT_KHR: return "ahit";
+ case VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR: return "chit";
+ case VK_SHADER_STAGE_MISS_BIT_KHR: return "miss";
+ case VK_SHADER_STAGE_INTERSECTION_BIT_KHR: return "sect";
+ case VK_SHADER_STAGE_CALLABLE_BIT_KHR: return "call";
+ default: TCU_THROW(InternalError, "Unhandled stage");
}
}
-std::string vkt::subgroups::getSubgroupFeatureName(vk::VkSubgroupFeatureFlagBits bit)
+std::string vkt::subgroups::getSubgroupFeatureName (vk::VkSubgroupFeatureFlagBits bit)
{
switch (bit)
{
- default:
- DE_FATAL("Unknown subgroup feature category!");
- return "";
- case VK_SUBGROUP_FEATURE_BASIC_BIT:
- return "VK_SUBGROUP_FEATURE_BASIC_BIT";
- case VK_SUBGROUP_FEATURE_VOTE_BIT:
- return "VK_SUBGROUP_FEATURE_VOTE_BIT";
- case VK_SUBGROUP_FEATURE_ARITHMETIC_BIT:
- return "VK_SUBGROUP_FEATURE_ARITHMETIC_BIT";
- case VK_SUBGROUP_FEATURE_BALLOT_BIT:
- return "VK_SUBGROUP_FEATURE_BALLOT_BIT";
- case VK_SUBGROUP_FEATURE_SHUFFLE_BIT:
- return "VK_SUBGROUP_FEATURE_SHUFFLE_BIT";
- case VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT:
- return "VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT";
- case VK_SUBGROUP_FEATURE_CLUSTERED_BIT:
- return "VK_SUBGROUP_FEATURE_CLUSTERED_BIT";
- case VK_SUBGROUP_FEATURE_QUAD_BIT:
- return "VK_SUBGROUP_FEATURE_QUAD_BIT";
+ case VK_SUBGROUP_FEATURE_BASIC_BIT: return "VK_SUBGROUP_FEATURE_BASIC_BIT";
+ case VK_SUBGROUP_FEATURE_VOTE_BIT: return "VK_SUBGROUP_FEATURE_VOTE_BIT";
+ case VK_SUBGROUP_FEATURE_ARITHMETIC_BIT: return "VK_SUBGROUP_FEATURE_ARITHMETIC_BIT";
+ case VK_SUBGROUP_FEATURE_BALLOT_BIT: return "VK_SUBGROUP_FEATURE_BALLOT_BIT";
+ case VK_SUBGROUP_FEATURE_SHUFFLE_BIT: return "VK_SUBGROUP_FEATURE_SHUFFLE_BIT";
+ case VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT: return "VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT";
+ case VK_SUBGROUP_FEATURE_CLUSTERED_BIT: return "VK_SUBGROUP_FEATURE_CLUSTERED_BIT";
+ case VK_SUBGROUP_FEATURE_QUAD_BIT: return "VK_SUBGROUP_FEATURE_QUAD_BIT";
+ default: TCU_THROW(InternalError, "Unknown subgroup feature category");
}
}
}
-
-std::string vkt::subgroups::getVertShaderForStage(vk::VkShaderStageFlags stage)
+static std::string getFramebufferBufferDeclarations (const VkFormat& format,
+ const std::vector<std::string>& declarations,
+ const deUint32 stage)
{
- switch (stage)
+ if (declarations.empty())
{
- default:
- DE_FATAL("Unhandled stage!");
- return "";
- case VK_SHADER_STAGE_FRAGMENT_BIT:
- return
- "#version 450\n"
- "void main (void)\n"
- "{\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- "}\n";
- case VK_SHADER_STAGE_GEOMETRY_BIT:
- return
- "#version 450\n"
- "void main (void)\n"
- "{\n"
- "}\n";
- case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
- case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
- return
- "#version 450\n"
- "void main (void)\n"
- "{\n"
- "}\n";
+ const std::string name = (stage == 0) ? "result" : "out_color";
+ const std::string suffix = (stage == 2) ? "[]" : "";
+ const std::string result =
+ "layout(location = 0) out float " + name + suffix + ";\n"
+ "layout(set = 0, binding = 0) uniform Buffer1\n"
+ "{\n"
+ " " + de::toString(subgroups::getFormatNameForGLSL(format)) + " data[" + de::toString(subgroups::maxSupportedSubgroupSize()) + "];\n"
+ "};\n";
+
+ return result;
+ }
+ else
+ {
+ return declarations[stage];
}
}
-void vkt::subgroups::initStdFrameBufferPrograms( SourceCollections& programCollection,
- const vk::ShaderBuildOptions& buildOptions,
- VkShaderStageFlags shaderStage,
- VkFormat format,
- bool gsPointSize,
- std::string extHeader,
- std::string testSrc,
- std::string helperStr)
+void vkt::subgroups::initStdFrameBufferPrograms (SourceCollections& programCollection,
+ const vk::ShaderBuildOptions& buildOptions,
+ VkShaderStageFlags shaderStage,
+ VkFormat format,
+ bool gsPointSize,
+ const std::string& extHeader,
+ const std::string& testSrc,
+ const std::string& helperStr,
+ const std::vector<std::string>& declarations)
{
subgroups::setFragmentShaderFrameBuffer(programCollection);
if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
{
std::ostringstream vertex;
+
vertex << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
+ << extHeader
<< "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(location = 0) out float result;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
+ << getFramebufferBufferDeclarations(format, declarations, 0)
<< "\n"
- << helperStr.c_str()
+ << helperStr
<< "void main (void)\n"
<< "{\n"
<< " uint tempRes;\n"
<< " gl_Position = in_position;\n"
<< " gl_PointSize = 1.0f;\n"
<< "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex.str()) << buildOptions;
+
+ programCollection.glslSources.add("vert") << glu::VertexSource(vertex.str()) << buildOptions;
}
else if (shaderStage == VK_SHADER_STAGE_GEOMETRY_BIT)
{
std::ostringstream geometry;
geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
+ << extHeader
<< "layout(points) in;\n"
<< "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
+ << getFramebufferBufferDeclarations(format, declarations, 1)
<< "\n"
- << helperStr.c_str()
+ << helperStr
<< "void main (void)\n"
<< "{\n"
<< " uint tempRes;\n"
<< " EndPrimitive();\n"
<< "}\n";
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
+ programCollection.glslSources.add("geometry") << glu::GeometrySource(geometry.str()) << buildOptions;
}
else if (shaderStage == VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT)
{
std::ostringstream controlSource;
+
controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
+ << extHeader
<< "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
+ << getFramebufferBufferDeclarations(format, declarations, 2)
<< "\n"
- << helperStr.c_str()
+ << helperStr
<< "void main (void)\n"
<< "{\n"
<< " if (gl_InvocationID == 0)\n"
<< testSrc
<< " out_color[gl_InvocationID] = float(tempRes);\n"
<< " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (gsPointSize ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
+ << (gsPointSize ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
<< "}\n";
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
+ programCollection.glslSources.add("tesc") << glu::TessellationControlSource(controlSource.str()) << buildOptions;
subgroups::setTesEvalShaderFrameBuffer(programCollection);
}
else if (shaderStage == VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
{
ostringstream evaluationSource;
+
evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extHeader.c_str()
+ << extHeader
<< "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
+ << getFramebufferBufferDeclarations(format, declarations, 3)
<< "\n"
- << helperStr.c_str()
+ << helperStr
<< "void main (void)\n"
<< "{\n"
<< " uint tempRes;\n"
}
}
-void vkt::subgroups::initStdPrograms( vk::SourceCollections& programCollection,
- const vk::ShaderBuildOptions& buildOptions,
- vk::VkShaderStageFlags shaderStage,
- vk::VkFormat format,
- bool gsPointSize,
- std::string extHeader,
- std::string testSrc,
- std::string helperStr)
+static std::string getBufferDeclarations (vk::VkShaderStageFlags shaderStage,
+ const std::string& formatName,
+ const std::vector<std::string>& declarations,
+ const deUint32 stage)
{
- if (shaderStage == VK_SHADER_STAGE_COMPUTE_BIT)
+ if (declarations.empty())
+ {
+ const deUint32 stageCount = vkt::subgroups::getStagesCount(shaderStage);
+ const deUint32 binding0 = stage;
+ const deUint32 binding1 = stageCount;
+ const bool fragment = (shaderStage & VK_SHADER_STAGE_FRAGMENT_BIT) && (stage == stageCount);
+ const string buffer1 = fragment
+ ? "layout(location = 0) out uint result;\n"
+ : "layout(set = 0, binding = " + de::toString(binding0) + ", std430) buffer Buffer1\n"
+ "{\n"
+ " uint result[];\n"
+ "};\n";
+ //todo boza I suppose it can be "layout(set = 0, binding = " + de::toString(binding1) + ", std430) readonly buffer Buffer2\n"
+ const string buffer2 = "layout(set = 0, binding = " + de::toString(binding1) + ", std430)" + (stageCount == 1 ? "" : " readonly") + " buffer Buffer" + (fragment ? "1" : "2") + "\n"
+ "{\n"
+ " " + formatName + " data[];\n"
+ "};\n";
+
+ return buffer1 + buffer2;
+ }
+ else
+ {
+ return declarations[stage];
+ }
+}
+
+void vkt::subgroups::initStdPrograms (vk::SourceCollections& programCollection,
+ const vk::ShaderBuildOptions& buildOptions,
+ vk::VkShaderStageFlags shaderStage,
+ vk::VkFormat format,
+ bool gsPointSize,
+ const std::string& extHeader,
+ const std::string& testSrc,
+ const std::string& helperStr,
+ const std::vector<std::string>& declarations,
+ const bool avoidHelperInvocations,
+ const std::string& tempRes)
+{
+ const std::string formatName = subgroups::getFormatNameForGLSL(format);
+
+ if (isAllComputeStages(shaderStage))
{
- std::ostringstream src;
+ std::ostringstream src;
src << "#version 450\n"
- << extHeader.c_str()
+ << extHeader
<< "layout (local_size_x_id = 0, local_size_y_id = 1, "
"local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint result[];\n"
- << "};\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
- << "{\n"
- << " " << subgroups::getFormatNameForGLSL(format) << " data[];\n"
- << "};\n"
+ << getBufferDeclarations(shaderStage, formatName, declarations, 0)
<< "\n"
- << helperStr.c_str()
+ << helperStr
<< "void main (void)\n"
<< "{\n"
<< " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
<< " highp uint offset = globalSize.x * ((globalSize.y * "
"gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
"gl_GlobalInvocationID.x;\n"
- << " uint tempRes;\n"
+ << tempRes
<< testSrc
<< " result[offset] = tempRes;\n"
<< "}\n";
programCollection.glslSources.add("comp") << glu::ComputeSource(src.str()) << buildOptions;
}
- else
+ else if (isAllGraphicsStages(shaderStage))
{
const string vertex =
"#version 450\n"
- + extHeader +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(format) + " data[];\n"
- "};\n"
+ + extHeader
+ + getBufferDeclarations(shaderStage, formatName, declarations, 0) +
"\n"
+ helperStr +
"void main (void)\n"
"#version 450\n"
+ extHeader +
"layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(format) + " data[];\n"
- "};\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 1) +
"\n"
+ helperStr +
"void main (void)\n"
"{\n"
- " uint tempRes;\n"
+ + tempRes
+ testSrc +
" result[gl_PrimitiveID] = tempRes;\n"
" if (gl_InvocationID == 0)\n"
" gl_TessLevelOuter[1] = 1.0f;\n"
" }\n"
" gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (gsPointSize ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
+ + (gsPointSize ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
"}\n";
const string tese =
"#version 450\n"
+ extHeader +
"layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(format) + " data[];\n"
- "};\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 2) +
"\n"
+ helperStr +
"void main (void)\n"
"{\n"
- " uint tempRes;\n"
+ + tempRes
+ testSrc +
" result[gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5)] = tempRes;\n"
" float pixelSize = 2.0f/1024.0f;\n"
+ extHeader +
"layout(${TOPOLOGY}) in;\n"
"layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint result[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(format) + " data[];\n"
- "};\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 3) +
"\n"
+ helperStr +
"void main (void)\n"
"{\n"
- " uint tempRes;\n"
+ + tempRes
+ testSrc +
" result[gl_PrimitiveIDIn] = tempRes;\n"
" gl_Position = gl_in[0].gl_Position;\n"
const string fragment =
"#version 450\n"
- + extHeader +
- "layout(location = 0) out uint result;\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer1\n"
- "{\n"
- " " + subgroups::getFormatNameForGLSL(format) + " data[];\n"
- "};\n"
+ + extHeader
+ + getBufferDeclarations(shaderStage, formatName, declarations, 4)
+ helperStr +
"void main (void)\n"
"{\n"
- " uint tempRes;\n"
+ + (avoidHelperInvocations ? " if (gl_HelperInvocation) return;\n" : "")
+ + tempRes
+ testSrc +
" result = tempRes;\n"
"}\n";
subgroups::addGeometryShadersFromTemplate(geometry, buildOptions, programCollection.glslSources);
programCollection.glslSources.add("fragment") << glu::FragmentSource(fragment)<< buildOptions;
}
+ else if (isAllRayTracingStages(shaderStage))
+ {
+ const std::string rgenShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "layout(location = 0) rayPayloadEXT uvec4 payload;\n"
+ "layout(location = 0) callableDataEXT uvec4 callData;"
+ "layout(set = 1, binding = 0) uniform accelerationStructureEXT topLevelAS;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 0) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " uint rayFlags = 0;\n"
+ " uint cullMask = 0xFF;\n"
+ " float tmin = 0.0;\n"
+ " float tmax = 9.0;\n"
+ " vec3 origin = vec3((float(gl_LaunchIDEXT.x) + 0.5f) / float(gl_LaunchSizeEXT.x), (float(gl_LaunchIDEXT.y) + 0.5f) / float(gl_LaunchSizeEXT.y), 0.0);\n"
+ " vec3 directHit = vec3(0.0, 0.0, -1.0);\n"
+ " vec3 directMiss = vec3(0.0, 0.0, +1.0);\n"
+ "\n"
+ " traceRayEXT(topLevelAS, rayFlags, cullMask, 0, 0, 0, origin, tmin, directHit, tmax, 0);\n"
+ " traceRayEXT(topLevelAS, rayFlags, cullMask, 0, 0, 0, origin, tmin, directMiss, tmax, 0);\n"
+ " executeCallableEXT(0, 0);"
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+ const std::string ahitShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "hitAttributeEXT vec3 attribs;\n"
+ "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 1) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+ const std::string chitShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "hitAttributeEXT vec3 attribs;\n"
+ "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 2) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+ const std::string missShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 3) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+ const std::string sectShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "hitAttributeEXT vec3 hitAttribute;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 4) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " reportIntersectionEXT(0.75f, gl_HitKindFrontFacingTriangleEXT);\n"
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+ const std::string callShader =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ + extHeader +
+ "layout(location = 0) callableDataInEXT float callData;\n"
+ + getBufferDeclarations(shaderStage, formatName, declarations, 5) +
+ "\n"
+ + helperStr +
+ "void main()\n"
+ "{\n"
+ + tempRes
+ + testSrc +
+ " result[gl_LaunchIDEXT.x] = tempRes;\n"
+ "}\n";
+
+ programCollection.glslSources.add("rgen") << glu::RaygenSource (rgenShader) << buildOptions;
+ programCollection.glslSources.add("ahit") << glu::AnyHitSource (ahitShader) << buildOptions;
+ programCollection.glslSources.add("chit") << glu::ClosestHitSource (chitShader) << buildOptions;
+ programCollection.glslSources.add("miss") << glu::MissSource (missShader) << buildOptions;
+ programCollection.glslSources.add("sect") << glu::IntersectionSource(sectShader) << buildOptions;
+ programCollection.glslSources.add("call") << glu::CallableSource (callShader) << buildOptions;
+
+ subgroups::addRayTracingNoSubgroupShader(programCollection);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
+
}
-bool vkt::subgroups::isSubgroupSupported(Context& context)
+bool vkt::subgroups::isSubgroupSupported (Context& context)
{
return context.contextSupports(vk::ApiVersion(1, 1, 0));
}
-bool vkt::subgroups::areSubgroupOperationsSupportedForStage(
- Context& context, const VkShaderStageFlags stage)
+bool vkt::subgroups::areSubgroupOperationsSupportedForStage (Context& context, const VkShaderStageFlags stage)
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- return (stage & subgroupProperties.supportedStages) ? true : false;
+ return (stage & (context.getSubgroupProperties().supportedStages)) ? true : false;
}
-bool vkt::subgroups::areSubgroupOperationsRequiredForStage(
- VkShaderStageFlags stage)
+bool vkt::subgroups::isSubgroupFeatureSupportedForDevice (Context& context, VkSubgroupFeatureFlagBits bit)
{
- switch (stage)
- {
- default:
- return false;
- case VK_SHADER_STAGE_COMPUTE_BIT:
- return true;
- }
-}
-
-bool vkt::subgroups::isSubgroupFeatureSupportedForDevice(
- Context& context,
- VkSubgroupFeatureFlagBits bit) {
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- return (bit & subgroupProperties.supportedOperations) ? true : false;
+ return (bit & (context.getSubgroupProperties().supportedOperations)) ? true : false;
}
-bool vkt::subgroups::isFragmentSSBOSupportedForDevice(Context& context)
+bool vkt::subgroups::isFragmentSSBOSupportedForDevice (Context& context)
{
- const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(
- context.getInstanceInterface(), context.getPhysicalDevice());
- return features.fragmentStoresAndAtomics ? true : false;
+ return context.getDeviceFeatures().fragmentStoresAndAtomics ? true : false;
}
-bool vkt::subgroups::isVertexSSBOSupportedForDevice(Context& context)
+bool vkt::subgroups::isVertexSSBOSupportedForDevice (Context& context)
{
- const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(
- context.getInstanceInterface(), context.getPhysicalDevice());
- return features.vertexPipelineStoresAndAtomics ? true : false;
+ return context.getDeviceFeatures().vertexPipelineStoresAndAtomics ? true : false;
}
-bool vkt::subgroups::isInt64SupportedForDevice(Context& context)
+bool vkt::subgroups::isInt64SupportedForDevice (Context& context)
{
- const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(
- context.getInstanceInterface(), context.getPhysicalDevice());
- return features.shaderInt64 ? true : false;
+ return context.getDeviceFeatures().shaderInt64 ? true : false;
}
bool vkt::subgroups::isTessellationAndGeometryPointSizeSupported (Context& context)
{
- const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(
- context.getInstanceInterface(), context.getPhysicalDevice());
- return features.shaderTessellationAndGeometryPointSize ? true : false;
+ return context.getDeviceFeatures().shaderTessellationAndGeometryPointSize ? true : false;
}
-bool vkt::subgroups::is16BitUBOStorageSupported(Context& context) {
- VkPhysicalDevice16BitStorageFeatures storage16bit;
- deMemset(&storage16bit, 0, sizeof(storage16bit));
- storage16bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR;
- storage16bit.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features2;
- deMemset(&features2, 0, sizeof(features2));
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features2.pNext = &storage16bit;
-
- const PlatformInterface& platformInterface = context.getPlatformInterface();
- const VkInstance instance = context.getInstance();
- const InstanceDriver instanceDriver(platformInterface, instance);
-
- instanceDriver.getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
- return bool(storage16bit.uniformAndStorageBuffer16BitAccess);
+bool vkt::subgroups::is16BitUBOStorageSupported (Context& context)
+{
+ return context.get16BitStorageFeatures().uniformAndStorageBuffer16BitAccess ? true : false;
}
-
-bool vkt::subgroups::is8BitUBOStorageSupported(Context& context) {
-
- VkPhysicalDevice8BitStorageFeatures storage8bit;
- deMemset(&storage8bit, 0, sizeof(storage8bit));
- storage8bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR;
- storage8bit.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features2;
- deMemset(&features2, 0, sizeof(features2));
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features2.pNext = &storage8bit;
-
-
- const PlatformInterface& platformInterface = context.getPlatformInterface();
- const VkInstance instance = context.getInstance();
- const InstanceDriver instanceDriver(platformInterface, instance);
-
- instanceDriver.getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
- return bool(storage8bit.uniformAndStorageBuffer8BitAccess);
+bool vkt::subgroups::is8BitUBOStorageSupported (Context& context)
+{
+ return context.get8BitStorageFeatures().uniformAndStorageBuffer8BitAccess ? true : false;
}
-bool vkt::subgroups::isFormatSupportedForDevice(Context& context, vk::VkFormat format)
+bool vkt::subgroups::isFormatSupportedForDevice (Context& context, vk::VkFormat format)
{
- VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures subgroupExtendedTypesFeatures;
- deMemset(&subgroupExtendedTypesFeatures, 0, sizeof(subgroupExtendedTypesFeatures));
- subgroupExtendedTypesFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_SUBGROUP_EXTENDED_TYPES_FEATURES;
- subgroupExtendedTypesFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceShaderFloat16Int8Features float16Int8Features;
- deMemset(&float16Int8Features, 0, sizeof(float16Int8Features));
- float16Int8Features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_FLOAT16_INT8_FEATURES;
- float16Int8Features.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features2;
- deMemset(&features2, 0, sizeof(features2));
- features2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features2.pNext = DE_NULL;
-
- VkPhysicalDevice16BitStorageFeatures storage16bit;
- deMemset(&storage16bit, 0, sizeof(storage16bit));
- storage16bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES_KHR;
- storage16bit.pNext = DE_NULL;
- bool is16bitStorageSupported = context.isDeviceFunctionalitySupported("VK_KHR_16bit_storage");
-
- VkPhysicalDevice8BitStorageFeatures storage8bit;
- deMemset(&storage8bit, 0, sizeof(storage8bit));
- storage8bit.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_8BIT_STORAGE_FEATURES_KHR;
- storage8bit.pNext = DE_NULL;
- bool is8bitStorageSupported = context.isDeviceFunctionalitySupported("VK_KHR_8bit_storage");
+ const VkPhysicalDeviceShaderSubgroupExtendedTypesFeatures& subgroupExtendedTypesFeatures = context.getShaderSubgroupExtendedTypesFeatures();
+ const VkPhysicalDeviceShaderFloat16Int8Features& float16Int8Features = context.getShaderFloat16Int8Features();
+ const VkPhysicalDevice16BitStorageFeatures& storage16bit = context.get16BitStorageFeatures();
+ const VkPhysicalDevice8BitStorageFeatures& storage8bit = context.get8BitStorageFeatures();
+ const VkPhysicalDeviceFeatures& features = context.getDeviceFeatures();
+ bool shaderFloat64 = features.shaderFloat64 ? true : false;
+ bool shaderInt16 = features.shaderInt16 ? true : false;
+ bool shaderInt64 = features.shaderInt64 ? true : false;
+ bool shaderSubgroupExtendedTypes = false;
+ bool shaderFloat16 = false;
+ bool shaderInt8 = false;
+ bool storageBuffer16BitAccess = false;
+ bool storageBuffer8BitAccess = false;
if (context.isDeviceFunctionalitySupported("VK_KHR_shader_subgroup_extended_types") &&
context.isDeviceFunctionalitySupported("VK_KHR_shader_float16_int8"))
{
- features2.pNext = &subgroupExtendedTypesFeatures;
- subgroupExtendedTypesFeatures.pNext = &float16Int8Features;
- if ( is16bitStorageSupported )
- {
- float16Int8Features.pNext = &storage16bit;
- if (is8bitStorageSupported)
- {
- storage16bit.pNext = &storage8bit;
- }
- }
- else
- {
- if (is8bitStorageSupported)
- {
- float16Int8Features.pNext = &storage8bit;
- }
-
- }
- }
+ shaderSubgroupExtendedTypes = subgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes ? true : false;
+ shaderFloat16 = float16Int8Features.shaderFloat16 ? true : false;
+ shaderInt8 = float16Int8Features.shaderInt8 ? true : false;
- const PlatformInterface& platformInterface = context.getPlatformInterface();
- const VkInstance instance = context.getInstance();
- const InstanceDriver instanceDriver (platformInterface, instance);
+ if ( context.isDeviceFunctionalitySupported("VK_KHR_16bit_storage") )
+ storageBuffer16BitAccess = storage16bit.storageBuffer16BitAccess ? true : false;
- instanceDriver.getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
+ if (context.isDeviceFunctionalitySupported("VK_KHR_8bit_storage"))
+ storageBuffer8BitAccess = storage8bit.storageBuffer8BitAccess ? true : false;
+ }
switch (format)
{
case VK_FORMAT_R16G16_SFLOAT:
case VK_FORMAT_R16G16B16_SFLOAT:
case VK_FORMAT_R16G16B16A16_SFLOAT:
- return subgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes & float16Int8Features.shaderFloat16 & storage16bit.storageBuffer16BitAccess ? true : false;
+ return shaderSubgroupExtendedTypes && shaderFloat16 && storageBuffer16BitAccess;
case VK_FORMAT_R64_SFLOAT:
case VK_FORMAT_R64G64_SFLOAT:
case VK_FORMAT_R64G64B64_SFLOAT:
case VK_FORMAT_R64G64B64A64_SFLOAT:
- return features2.features.shaderFloat64 ? true : false;
+ return shaderFloat64;
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8B8_SINT:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8B8_UINT:
case VK_FORMAT_R8G8B8A8_UINT:
- return subgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes & float16Int8Features.shaderInt8 & storage8bit.storageBuffer8BitAccess ? true : false;
+ return shaderSubgroupExtendedTypes && shaderInt8 && storageBuffer8BitAccess;
case VK_FORMAT_R16_SINT:
case VK_FORMAT_R16G16_SINT:
case VK_FORMAT_R16G16B16_SINT:
case VK_FORMAT_R16G16_UINT:
case VK_FORMAT_R16G16B16_UINT:
case VK_FORMAT_R16G16B16A16_UINT:
- return subgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes & features2.features.shaderInt16 & storage16bit.storageBuffer16BitAccess ? true : false;
+ return shaderSubgroupExtendedTypes && shaderInt16 && storageBuffer16BitAccess;
case VK_FORMAT_R64_SINT:
case VK_FORMAT_R64G64_SINT:
case VK_FORMAT_R64G64B64_SINT:
case VK_FORMAT_R64G64_UINT:
case VK_FORMAT_R64G64B64_UINT:
case VK_FORMAT_R64G64B64A64_UINT:
- return subgroupExtendedTypesFeatures.shaderSubgroupExtendedTypes & features2.features.shaderInt64 ? true : false;
+ return shaderSubgroupExtendedTypes && shaderInt64;
}
}
bool vkt::subgroups::isSubgroupBroadcastDynamicIdSupported (Context& context)
{
- return context.contextSupports(vk::ApiVersion(1, 2, 0)) &&
- vk::getPhysicalDeviceVulkan12Features(context.getInstanceInterface(), context.getPhysicalDevice()).subgroupBroadcastDynamicId;
+ return context.contextSupports(vk::ApiVersion(1, 2, 0)) && context.getDeviceVulkan12Features().subgroupBroadcastDynamicId;
}
std::string vkt::subgroups::getFormatNameForGLSL (VkFormat format)
{
switch (format)
{
- default:
- DE_FATAL("Unhandled format!");
- return "";
- case VK_FORMAT_R8_SINT:
- return "int8_t";
- case VK_FORMAT_R8G8_SINT:
- return "i8vec2";
- case VK_FORMAT_R8G8B8_SINT:
- return "i8vec3";
- case VK_FORMAT_R8G8B8A8_SINT:
- return "i8vec4";
- case VK_FORMAT_R8_UINT:
- return "uint8_t";
- case VK_FORMAT_R8G8_UINT:
- return "u8vec2";
- case VK_FORMAT_R8G8B8_UINT:
- return "u8vec3";
- case VK_FORMAT_R8G8B8A8_UINT:
- return "u8vec4";
- case VK_FORMAT_R16_SINT:
- return "int16_t";
- case VK_FORMAT_R16G16_SINT:
- return "i16vec2";
- case VK_FORMAT_R16G16B16_SINT:
- return "i16vec3";
- case VK_FORMAT_R16G16B16A16_SINT:
- return "i16vec4";
- case VK_FORMAT_R16_UINT:
- return "uint16_t";
- case VK_FORMAT_R16G16_UINT:
- return "u16vec2";
- case VK_FORMAT_R16G16B16_UINT:
- return "u16vec3";
- case VK_FORMAT_R16G16B16A16_UINT:
- return "u16vec4";
- case VK_FORMAT_R32_SINT:
- return "int";
- case VK_FORMAT_R32G32_SINT:
- return "ivec2";
- case VK_FORMAT_R32G32B32_SINT:
- return "ivec3";
- case VK_FORMAT_R32G32B32A32_SINT:
- return "ivec4";
- case VK_FORMAT_R32_UINT:
- return "uint";
- case VK_FORMAT_R32G32_UINT:
- return "uvec2";
- case VK_FORMAT_R32G32B32_UINT:
- return "uvec3";
- case VK_FORMAT_R32G32B32A32_UINT:
- return "uvec4";
- case VK_FORMAT_R64_SINT:
- return "int64_t";
- case VK_FORMAT_R64G64_SINT:
- return "i64vec2";
- case VK_FORMAT_R64G64B64_SINT:
- return "i64vec3";
- case VK_FORMAT_R64G64B64A64_SINT:
- return "i64vec4";
- case VK_FORMAT_R64_UINT:
- return "uint64_t";
- case VK_FORMAT_R64G64_UINT:
- return "u64vec2";
- case VK_FORMAT_R64G64B64_UINT:
- return "u64vec3";
- case VK_FORMAT_R64G64B64A64_UINT:
- return "u64vec4";
- case VK_FORMAT_R16_SFLOAT:
- return "float16_t";
- case VK_FORMAT_R16G16_SFLOAT:
- return "f16vec2";
- case VK_FORMAT_R16G16B16_SFLOAT:
- return "f16vec3";
- case VK_FORMAT_R16G16B16A16_SFLOAT:
- return "f16vec4";
- case VK_FORMAT_R32_SFLOAT:
- return "float";
- case VK_FORMAT_R32G32_SFLOAT:
- return "vec2";
- case VK_FORMAT_R32G32B32_SFLOAT:
- return "vec3";
- case VK_FORMAT_R32G32B32A32_SFLOAT:
- return "vec4";
- case VK_FORMAT_R64_SFLOAT:
- return "double";
- case VK_FORMAT_R64G64_SFLOAT:
- return "dvec2";
- case VK_FORMAT_R64G64B64_SFLOAT:
- return "dvec3";
- case VK_FORMAT_R64G64B64A64_SFLOAT:
- return "dvec4";
- case VK_FORMAT_R8_USCALED:
- return "bool";
- case VK_FORMAT_R8G8_USCALED:
- return "bvec2";
- case VK_FORMAT_R8G8B8_USCALED:
- return "bvec3";
- case VK_FORMAT_R8G8B8A8_USCALED:
- return "bvec4";
+ case VK_FORMAT_R8_SINT: return "int8_t";
+ case VK_FORMAT_R8G8_SINT: return "i8vec2";
+ case VK_FORMAT_R8G8B8_SINT: return "i8vec3";
+ case VK_FORMAT_R8G8B8A8_SINT: return "i8vec4";
+ case VK_FORMAT_R8_UINT: return "uint8_t";
+ case VK_FORMAT_R8G8_UINT: return "u8vec2";
+ case VK_FORMAT_R8G8B8_UINT: return "u8vec3";
+ case VK_FORMAT_R8G8B8A8_UINT: return "u8vec4";
+ case VK_FORMAT_R16_SINT: return "int16_t";
+ case VK_FORMAT_R16G16_SINT: return "i16vec2";
+ case VK_FORMAT_R16G16B16_SINT: return "i16vec3";
+ case VK_FORMAT_R16G16B16A16_SINT: return "i16vec4";
+ case VK_FORMAT_R16_UINT: return "uint16_t";
+ case VK_FORMAT_R16G16_UINT: return "u16vec2";
+ case VK_FORMAT_R16G16B16_UINT: return "u16vec3";
+ case VK_FORMAT_R16G16B16A16_UINT: return "u16vec4";
+ case VK_FORMAT_R32_SINT: return "int";
+ case VK_FORMAT_R32G32_SINT: return "ivec2";
+ case VK_FORMAT_R32G32B32_SINT: return "ivec3";
+ case VK_FORMAT_R32G32B32A32_SINT: return "ivec4";
+ case VK_FORMAT_R32_UINT: return "uint";
+ case VK_FORMAT_R32G32_UINT: return "uvec2";
+ case VK_FORMAT_R32G32B32_UINT: return "uvec3";
+ case VK_FORMAT_R32G32B32A32_UINT: return "uvec4";
+ case VK_FORMAT_R64_SINT: return "int64_t";
+ case VK_FORMAT_R64G64_SINT: return "i64vec2";
+ case VK_FORMAT_R64G64B64_SINT: return "i64vec3";
+ case VK_FORMAT_R64G64B64A64_SINT: return "i64vec4";
+ case VK_FORMAT_R64_UINT: return "uint64_t";
+ case VK_FORMAT_R64G64_UINT: return "u64vec2";
+ case VK_FORMAT_R64G64B64_UINT: return "u64vec3";
+ case VK_FORMAT_R64G64B64A64_UINT: return "u64vec4";
+ case VK_FORMAT_R16_SFLOAT: return "float16_t";
+ case VK_FORMAT_R16G16_SFLOAT: return "f16vec2";
+ case VK_FORMAT_R16G16B16_SFLOAT: return "f16vec3";
+ case VK_FORMAT_R16G16B16A16_SFLOAT: return "f16vec4";
+ case VK_FORMAT_R32_SFLOAT: return "float";
+ case VK_FORMAT_R32G32_SFLOAT: return "vec2";
+ case VK_FORMAT_R32G32B32_SFLOAT: return "vec3";
+ case VK_FORMAT_R32G32B32A32_SFLOAT: return "vec4";
+ case VK_FORMAT_R64_SFLOAT: return "double";
+ case VK_FORMAT_R64G64_SFLOAT: return "dvec2";
+ case VK_FORMAT_R64G64B64_SFLOAT: return "dvec3";
+ case VK_FORMAT_R64G64B64A64_SFLOAT: return "dvec4";
+ case VK_FORMAT_R8_USCALED: return "bool";
+ case VK_FORMAT_R8G8_USCALED: return "bvec2";
+ case VK_FORMAT_R8G8B8_USCALED: return "bvec3";
+ case VK_FORMAT_R8G8B8A8_USCALED: return "bvec4";
+ default: TCU_THROW(InternalError, "Unhandled format");
}
}
}
}
-const std::vector<vk::VkFormat> vkt::subgroups::getAllFormats()
+const std::vector<vk::VkFormat> vkt::subgroups::getAllFormats ()
{
std::vector<VkFormat> formats;
}
}
-bool vkt::subgroups::isFormat8bitTy(VkFormat format)
+bool vkt::subgroups::isFormat8bitTy (VkFormat format)
{
switch (format)
{
}
}
-bool vkt::subgroups::isFormat16BitTy(VkFormat format)
+bool vkt::subgroups::isFormat16BitTy (VkFormat format)
{
switch (format)
{
collection.add("geometry_points") << geometryTemplate.specialize(pointsParams) << options;
}
-void initializeMemory(Context& context, const Allocation& alloc, subgroups::SSBOData& data)
+void initializeMemory (Context& context, const Allocation& alloc, const subgroups::SSBOData& data)
{
const vk::VkFormat format = data.format;
const vk::VkDeviceSize size = data.numElements *
return -1;
}
-tcu::TestStatus vkt::subgroups::makeTessellationEvaluationFrameBufferTest(
- Context& context, VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const VkShaderStageFlags shaderStage)
+tcu::TestStatus vkt::subgroups::makeTessellationEvaluationFrameBufferTest (Context& context,
+ VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult,
+ const VkShaderStageFlags shaderStage)
{
return makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult, shaderStage, 0u, 0u);
}
-tcu::TestStatus vkt::subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(
- Context& context, VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const VkShaderStageFlags shaderStage, const deUint32 tessShaderStageCreateFlags, const deUint32 requiredSubgroupSize)
+tcu::TestStatus vkt::subgroups::makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize (Context& context,
+ VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult,
+ const VkShaderStageFlags shaderStage,
+ const deUint32 tessShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
DescriptorSetUpdateBuilder updateBuilder;
Move <VkDescriptorPool> descriptorPool;
Move <VkDescriptorSet> descriptorSet;
-
- const Unique<VkShaderModule> vertexShaderModule (createShaderModule(vk, device,
- context.getBinaryCollection().get("vert"), 0u));
- const Unique<VkShaderModule> teCtrlShaderModule (createShaderModule(vk, device,
- context.getBinaryCollection().get("tesc"), 0u));
- const Unique<VkShaderModule> teEvalShaderModule (createShaderModule(vk, device,
- context.getBinaryCollection().get("tese"), 0u));
- const Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vk, device,
- context.getBinaryCollection().get("fragment"), 0u));
+ const Unique<VkShaderModule> vertexShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("vert"), 0u));
+ const Unique<VkShaderModule> teCtrlShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("tesc"), 0u));
+ const Unique<VkShaderModule> teEvalShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("tese"), 0u));
+ const Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("fragment"), 0u));
const Unique<VkRenderPass> renderPass (makeRenderPass(context, format));
-
const VkVertexInputBindingDescription vertexInputBinding =
{
- 0u, // binding;
- static_cast<deUint32>(sizeof(tcu::Vec4)), // stride;
- VK_VERTEX_INPUT_RATE_VERTEX // inputRate
+ 0u, // deUint32 binding;
+ static_cast<deUint32>(sizeof(tcu::Vec4)), // deUint32 stride;
+ VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
};
-
const VkVertexInputAttributeDescription vertexInputAttribute =
{
- 0u,
- 0u,
- VK_FORMAT_R32G32B32A32_SFLOAT,
- 0u
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u // deUint32 offset;
};
for (deUint32 i = 0u; i < extraDataCount; i++)
return tcu::TestStatus::pass("OK");
}
-bool vkt::subgroups::check(std::vector<const void*> datas,
- deUint32 width, deUint32 ref)
+bool vkt::subgroups::check (std::vector<const void*> datas, deUint32 width, deUint32 ref)
{
const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
return true;
}
-bool vkt::subgroups::checkCompute(std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 ref)
+bool vkt::subgroups::checkCompute (std::vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32 ref)
{
const deUint32 globalSizeX = numWorkgroups[0] * localSize[0];
const deUint32 globalSizeY = numWorkgroups[1] * localSize[1];
return check(datas, globalSizeX * globalSizeY * globalSizeZ, ref);
}
-tcu::TestStatus vkt::subgroups::makeGeometryFrameBufferTest(
- Context& context, VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize))
+tcu::TestStatus vkt::subgroups::makeGeometryFrameBufferTest (Context& context,
+ VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult)
{
- return makeGeometryFrameBufferTestRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult,
- 0u, 0u);
+ return makeGeometryFrameBufferTestRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult, 0u, 0u);
}
-tcu::TestStatus vkt::subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize(
- Context& context, VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const deUint32 geometryShaderStageCreateFlags, const deUint32 requiredSubgroupSize)
+tcu::TestStatus vkt::subgroups::makeGeometryFrameBufferTestRequiredSubgroupSize (Context& context,
+ VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult,
+ const deUint32 geometryShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
DescriptorSetUpdateBuilder updateBuilder;
Move <VkDescriptorPool> descriptorPool;
Move <VkDescriptorSet> descriptorSet;
-
const Unique<VkShaderModule> vertexShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("vert"), 0u));
const Unique<VkShaderModule> geometryShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("geometry"), 0u));
const Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("fragment"), 0u));
const Unique<VkRenderPass> renderPass (makeRenderPass(context, format));
const VkVertexInputBindingDescription vertexInputBinding =
{
- 0u, // binding;
- static_cast<deUint32>(sizeof(tcu::Vec4)), // stride;
- VK_VERTEX_INPUT_RATE_VERTEX // inputRate
+ 0u, // deUint32 binding;
+ static_cast<deUint32>(sizeof(tcu::Vec4)), // deUint32 stride;
+ VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate;
};
-
const VkVertexInputAttributeDescription vertexInputAttribute =
{
- 0u,
- 0u,
- VK_FORMAT_R32G32B32A32_SFLOAT,
- 0u
+ 0u, // deUint32 location;
+ 0u, // deUint32 binding;
+ VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
+ 0u // deUint32 offset;
};
for (deUint32 i = 0u; i < extraDataCount; i++)
return tcu::TestStatus::pass("OK");
}
-tcu::TestStatus vkt::subgroups::allStages(
- Context& context, VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- const VerificationFunctor& checkResult,
- const vk::VkShaderStageFlags shaderStage)
+vk::VkShaderStageFlags vkt::subgroups::getPossibleGraphicsSubgroupStages (Context& context, const vk::VkShaderStageFlags testedStages)
+{
+ const VkPhysicalDeviceSubgroupProperties& subgroupProperties = context.getSubgroupProperties();
+ VkShaderStageFlags stages = testedStages & subgroupProperties.supportedStages;
+
+ DE_ASSERT(isAllGraphicsStages(testedStages));
+
+ if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ {
+ if ((stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
+ TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
+ else
+ stages = VK_SHADER_STAGE_FRAGMENT_BIT;
+ }
+
+ if (static_cast<VkShaderStageFlags>(0u) == stages)
+ TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
+
+ return stages;
+}
+
+tcu::TestStatus vkt::subgroups::allStages (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStage)
{
return vkt::subgroups::allStagesRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult, shaderStage,
0u, 0u, 0u, 0u, 0u, DE_NULL);
}
-tcu::TestStatus vkt::subgroups::allStagesRequiredSubgroupSize(
- Context& context, VkFormat format, SSBOData* extraDatas,
- deUint32 extraDatasCount, const void* internalData,
- const VerificationFunctor& checkResult,
- const VkShaderStageFlags shaderStageTested,
- const deUint32 vertexShaderStageCreateFlags,
- const deUint32 tessellationControlShaderStageCreateFlags,
- const deUint32 tessellationEvalShaderStageCreateFlags,
- const deUint32 geometryShaderStageCreateFlags,
- const deUint32 fragmentShaderStageCreateFlags,
- const deUint32 requiredSubgroupSize[5])
+tcu::TestStatus vkt::subgroups::allStagesRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStageTested,
+ const deUint32 vertexShaderStageCreateFlags,
+ const deUint32 tessellationControlShaderStageCreateFlags,
+ const deUint32 tessellationEvalShaderStageCreateFlags,
+ const deUint32 geometryShaderStageCreateFlags,
+ const deUint32 fragmentShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize[5])
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
return tcu::TestStatus::pass("OK");
}
-tcu::TestStatus vkt::subgroups::makeVertexFrameBufferTest(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize))
+tcu::TestStatus vkt::subgroups::makeVertexFrameBufferTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult)
{
- return makeVertexFrameBufferTestRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult,
- 0u, 0u);
+ return makeVertexFrameBufferTestRequiredSubgroupSize(context, format, extraData, extraDataCount, internalData, checkResult, 0u, 0u);
}
-tcu::TestStatus vkt::subgroups::makeVertexFrameBufferTestRequiredSubgroupSize(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const deUint32 vertexShaderStageCreateFlags, const deUint32 requiredSubgroupSize)
+tcu::TestStatus vkt::subgroups::makeVertexFrameBufferTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ subgroups::CheckResult checkResult,
+ const deUint32 vertexShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize)
{
const DeviceInterface& vk = context.getDeviceInterface();
const VkDevice device = context.getDevice();
const Unique<VkShaderModule> vertexShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("vert"), 0u));
const Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("fragment"), 0u));
const Unique<VkRenderPass> renderPass (makeRenderPass(context, format));
-
const VkVertexInputBindingDescription vertexInputBinding =
{
0u, // binding;
static_cast<deUint32>(sizeof(tcu::Vec4)), // stride;
VK_VERTEX_INPUT_RATE_VERTEX // inputRate
};
-
const VkVertexInputAttributeDescription vertexInputAttribute =
{
0u,
return tcu::TestStatus::pass("OK");
}
-tcu::TestStatus vkt::subgroups::makeFragmentFrameBufferTest(
- Context& context, VkFormat format, SSBOData* extraDatas,
- deUint32 extraDatasCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width,
- deUint32 height, deUint32 subgroupSize))
+tcu::TestStatus vkt::subgroups::makeFragmentFrameBufferTest (Context& context,
+ VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ CheckResultFragment checkResult)
{
- return makeFragmentFrameBufferTestRequiredSubgroupSize(context, format, extraDatas, extraDatasCount, internalData, checkResult,
- 0u, 0u);
+ return makeFragmentFrameBufferTestRequiredSubgroupSize(context, format, extraDatas, extraDatasCount, internalData, checkResult, 0u, 0u);
}
-tcu::TestStatus vkt::subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize(
- Context& context, VkFormat format, SSBOData* extraDatas,
- deUint32 extraDatasCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width,
- deUint32 height, deUint32 subgroupSize),
- const deUint32 fragmentShaderStageCreateFlags, const deUint32 requiredSubgroupSize)
+tcu::TestStatus vkt::subgroups::makeFragmentFrameBufferTestRequiredSubgroupSize (Context& context,
+ VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ CheckResultFragment checkResult,
+ const deUint32 fragmentShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize)
{
- const DeviceInterface& vk = context.getDeviceInterface();
- const VkDevice device = context.getDevice();
- const VkQueue queue = context.getUniversalQueue();
- const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
- const Unique<VkShaderModule> vertexShaderModule (createShaderModule
- (vk, device, context.getBinaryCollection().get("vert"), 0u));
- const Unique<VkShaderModule> fragmentShaderModule (createShaderModule
- (vk, device, context.getBinaryCollection().get("fragment"), 0u));
-
- std::vector< de::SharedPtr<BufferOrImage> > inputBuffers(extraDatasCount);
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const VkQueue queue = context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ const Unique<VkShaderModule> vertexShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("vert"), 0u));
+ const Unique<VkShaderModule> fragmentShaderModule (createShaderModule(vk, device, context.getBinaryCollection().get("fragment"), 0u));
+ std::vector< de::SharedPtr<BufferOrImage> > inputBuffers (extraDatasCount);
for (deUint32 i = 0; i < extraDatasCount; i++)
{
if (extraDatas[i].isImage)
{
- inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Image(context,
- static_cast<deUint32>(extraDatas[i].numElements), 1, extraDatas[i].format));
+ inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Image(context, static_cast<deUint32>(extraDatas[i].numElements), 1, extraDatas[i].format));
}
else
{
- vk::VkDeviceSize size =
- getElementSizeInBytes(extraDatas[i].format, extraDatas[i].layout) * extraDatas[i].numElements;
+ const vk::VkDeviceSize size = getElementSizeInBytes(extraDatas[i].format, extraDatas[i].layout) * extraDatas[i].numElements;
+
inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Buffer(context, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT));
}
const Allocation& alloc = inputBuffers[i]->getAllocation();
+
initializeMemory(context, alloc, extraDatas[i]);
}
for (deUint32 i = 0; i < extraDatasCount; i++)
{
- layoutBuilder.addBinding(inputBuffers[i]->getType(), 1,
- VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
+ layoutBuilder.addBinding(inputBuffers[i]->getType(), 1, VK_SHADER_STAGE_FRAGMENT_BIT, DE_NULL);
}
- const Unique<VkDescriptorSetLayout> descriptorSetLayout(
- layoutBuilder.build(vk, device));
-
- const Unique<VkPipelineLayout> pipelineLayout(
- makePipelineLayout(vk, device, *descriptorSetLayout));
-
- const Unique<VkRenderPass> renderPass(makeRenderPass(context, format));
-
- const deUint32 requiredSubgroupSizes[5] = {0u, 0u, 0u, 0u, requiredSubgroupSize};
- const Unique<VkPipeline> pipeline(makeGraphicsPipeline(context, *pipelineLayout,
- VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
- *vertexShaderModule, *fragmentShaderModule, DE_NULL, DE_NULL, DE_NULL, *renderPass, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
- DE_NULL, DE_NULL, true, VK_FORMAT_R32G32B32A32_SFLOAT,
- 0u, 0u, 0u, 0u, fragmentShaderStageCreateFlags, requiredSubgroupSize != 0u ? requiredSubgroupSizes : DE_NULL));
-
- DescriptorPoolBuilder poolBuilder;
+ const Unique<VkDescriptorSetLayout> descriptorSetLayout(layoutBuilder.build(vk, device));
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, *descriptorSetLayout));
+ const Unique<VkRenderPass> renderPass(makeRenderPass(context, format));
+ const deUint32 requiredSubgroupSizes[5] = {0u, 0u, 0u, 0u, requiredSubgroupSize};
+ const Unique<VkPipeline> pipeline(makeGraphicsPipeline(context,
+ *pipelineLayout,
+ VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT,
+ *vertexShaderModule,
+ *fragmentShaderModule,
+ DE_NULL,
+ DE_NULL,
+ DE_NULL,
+ *renderPass,
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ DE_NULL,
+ DE_NULL,
+ true,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ 0u,
+ 0u,
+ 0u,
+ 0u,
+ fragmentShaderStageCreateFlags,
+ requiredSubgroupSize != 0u ? requiredSubgroupSizes : DE_NULL));
+ DescriptorPoolBuilder poolBuilder;
// To stop validation complaining, always add at least one type to pool.
poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
if (extraDatasCount > 0)
{
- descriptorPool = poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+ descriptorPool = poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout);
}
{
if (inputBuffers[i]->isImage())
{
- VkDescriptorImageInfo info =
- makeDescriptorImageInfo(inputBuffers[i]->getAsImage()->getSampler(),
- inputBuffers[i]->getAsImage()->getImageView(), VK_IMAGE_LAYOUT_GENERAL);
+ const VkDescriptorImageInfo info = makeDescriptorImageInfo(inputBuffers[i]->getAsImage()->getSampler(), inputBuffers[i]->getAsImage()->getImageView(), VK_IMAGE_LAYOUT_GENERAL);
- updateBuilder.writeSingle(*descriptorSet,
- DescriptorSetUpdateBuilder::Location::binding(i),
- inputBuffers[i]->getType(), &info);
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(i), inputBuffers[i]->getType(), &info);
}
else
{
- VkDescriptorBufferInfo info =
- makeDescriptorBufferInfo(inputBuffers[i]->getAsBuffer()->getBuffer(),
- 0ull, inputBuffers[i]->getAsBuffer()->getSize());
+ const VkDescriptorBufferInfo info = makeDescriptorBufferInfo(inputBuffers[i]->getAsBuffer()->getBuffer(), 0ull, inputBuffers[i]->getAsBuffer()->getSize());
- updateBuilder.writeSingle(*descriptorSet,
- DescriptorSetUpdateBuilder::Location::binding(i),
- inputBuffers[i]->getType(), &info);
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(i), inputBuffers[i]->getType(), &info);
}
}
updateBuilder.update(vk, device);
const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, device, queueFamilyIndex));
-
const deUint32 subgroupSize = getSubgroupSize(context);
-
const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(context, *cmdPool));
-
- unsigned totalIterations = 0;
- unsigned failedIterations = 0;
+ unsigned totalIterations = 0;
+ unsigned failedIterations = 0;
for (deUint32 width = 8; width <= subgroupSize; width *= 2)
{
for (deUint32 i = 0; i < extraDatasCount; i++)
{
const Allocation& alloc = inputBuffers[i]->getAllocation();
+
initializeMemory(context, alloc, extraDatas[i]);
}
- VkDeviceSize formatSize = getFormatSizeInBytes(format);
- const VkDeviceSize resultImageSizeInBytes =
- width * height * formatSize;
-
- Image resultImage(context, width, height, format,
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
- VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
-
- Buffer resultBuffer(context, resultImageSizeInBytes,
- VK_IMAGE_USAGE_TRANSFER_DST_BIT);
-
- const Unique<VkFramebuffer> framebuffer(makeFramebuffer(vk, device, *renderPass, resultImage.getImageView(), width, height));
+ const VkDeviceSize formatSize = getFormatSizeInBytes(format);
+ const VkDeviceSize resultImageSizeInBytes = width * height * formatSize;
+ Image resultImage (context, width, height, format, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
+ Buffer resultBuffer (context, resultImageSizeInBytes, VK_IMAGE_USAGE_TRANSFER_DST_BIT);
+ const Unique<VkFramebuffer> framebuffer (makeFramebuffer(vk, device, *renderPass, resultImage.getImageView(), width, height));
+ VkViewport viewport = makeViewport(width, height);
+ VkRect2D scissor = {{0, 0}, {width, height}};
beginCommandBuffer(vk, *cmdBuffer);
- VkViewport viewport = makeViewport(width, height);
-
- vk.cmdSetViewport(
- *cmdBuffer, 0, 1, &viewport);
-
- VkRect2D scissor = {{0, 0}, {width, height}};
+ vk.cmdSetViewport(*cmdBuffer, 0, 1, &viewport);
- vk.cmdSetScissor(
- *cmdBuffer, 0, 1, &scissor);
+ vk.cmdSetScissor(*cmdBuffer, 0, 1, &scissor);
beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer, makeRect2D(0, 0, width, height), tcu::Vec4(0.0f));
- vk.cmdBindPipeline(
- *cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
+ vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
if (extraDatasCount > 0)
- {
- vk.cmdBindDescriptorSets(*cmdBuffer,
- VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u,
- &descriptorSet.get(), 0u, DE_NULL);
- }
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
vk.cmdDraw(*cmdBuffer, 4, 1, 0, 0);
return tcu::TestStatus::pass("OK");
}
-Move<VkPipeline> makeComputePipeline(Context& context,
- const VkPipelineLayout pipelineLayout, const VkShaderModule shaderModule,
- const deUint32 pipelineShaderStageFlags, const deUint32 pipelineCreateFlags, VkPipeline basePipelineHandle,
- deUint32 localSizeX, deUint32 localSizeY, deUint32 localSizeZ, deUint32 requiredSubgroupSize)
+Move<VkPipeline> makeComputePipeline (Context& context,
+ const VkPipelineLayout pipelineLayout,
+ const VkShaderModule shaderModule,
+ const deUint32 pipelineShaderStageFlags,
+ const deUint32 pipelineCreateFlags,
+ VkPipeline basePipelineHandle,
+ deUint32 localSizeX,
+ deUint32 localSizeY,
+ deUint32 localSizeZ,
+ deUint32 requiredSubgroupSize)
{
- const deUint32 localSize[3] = {localSizeX, localSizeY, localSizeZ};
-
- const vk::VkSpecializationMapEntry entries[3] =
+ const deUint32 localSize[3] = {localSizeX, localSizeY, localSizeZ};
+ const vk::VkSpecializationMapEntry entries[3] =
{
{0, sizeof(deUint32) * 0, sizeof(deUint32)},
{1, sizeof(deUint32) * 1, sizeof(deUint32)},
{2, static_cast<deUint32>(sizeof(deUint32) * 2), sizeof(deUint32)},
};
-
- const vk::VkSpecializationInfo info =
+ const vk::VkSpecializationInfo info =
{
/* mapEntryCount = */ 3,
/* pMapEntries = */ entries,
/* dataSize = */ sizeof(localSize),
/* pData = */ localSize
};
-
- const vk::VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeCreateInfo =
+ const vk::VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT subgroupSizeCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT, // VkStructureType sType;
DE_NULL, // void* pNext;
requiredSubgroupSize // uint32_t requiredSubgroupSize;
};
-
- const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
+ const vk::VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
{
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
(requiredSubgroupSize != 0u ? &subgroupSizeCreateInfo : DE_NULL), // const void* pNext;
"main", // const char* pName;
&info, // const VkSpecializationInfo* pSpecializationInfo;
};
-
- const vk::VkComputePipelineCreateInfo pipelineCreateInfo =
+ const vk::VkComputePipelineCreateInfo pipelineCreateInfo =
{
VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
-1, // deInt32 basePipelineIndex;
};
- return createComputePipeline(context.getDeviceInterface(),
- context.getDevice(), DE_NULL, &pipelineCreateInfo);
+ return createComputePipeline(context.getDeviceInterface(), context.getDevice(), DE_NULL, &pipelineCreateInfo);
}
-tcu::TestStatus vkt::subgroups::makeComputeTestRequiredSubgroupSize(
- Context& context, VkFormat format, SSBOData* inputs, deUint32 inputsCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize),
- const deUint32 pipelineShaderStageCreateFlags, const deUint32 numWorkgroups[3],
- const deBool isRequiredSubgroupSize, const deUint32 subgroupSize, const deUint32 localSizesToTest[][3], const deUint32 localSizesToTestCount)
+tcu::TestStatus vkt::subgroups::makeComputeTestRequiredSubgroupSize (Context& context,
+ VkFormat format,
+ const SSBOData* inputs,
+ deUint32 inputsCount,
+ const void* internalData,
+ CheckResultCompute checkResult,
+ const deUint32 pipelineShaderStageCreateFlags,
+ const deUint32 numWorkgroups[3],
+ const deBool isRequiredSubgroupSize,
+ const deUint32 subgroupSize,
+ const deUint32 localSizesToTest[][3],
+ const deUint32 localSizesToTestCount)
{
- const DeviceInterface& vk = context.getDeviceInterface();
- const VkDevice device = context.getDevice();
- const VkQueue queue = context.getUniversalQueue();
- const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
- VkDeviceSize elementSize = getFormatSizeInBytes(format);
-
- VkDeviceSize maxSubgroupSize = maxSupportedSubgroupSize();
-
- if (isRequiredSubgroupSize)
- {
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties2;
- properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties2.pNext = &subgroupSizeControlProperties;
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
- maxSubgroupSize = deMax32(subgroupSizeControlProperties.maxSubgroupSize, static_cast<deUint32>(maxSubgroupSize));
- }
-
- const VkDeviceSize resultBufferSize = maxSubgroupSize *
- maxSubgroupSize *
- maxSubgroupSize;
-
- const VkDeviceSize resultBufferSizeInBytes = resultBufferSize * elementSize;
-
- Buffer resultBuffer(
- context, resultBufferSizeInBytes);
-
- std::vector< de::SharedPtr<BufferOrImage> > inputBuffers(inputsCount);
+ const DeviceInterface& vk = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const VkQueue queue = context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ const VkDeviceSize elementSize = getFormatSizeInBytes(format);
+ const VkDeviceSize maxSubgroupSize = isRequiredSubgroupSize
+ ? deMax32(subgroupSizeControlProperties.maxSubgroupSize, maxSupportedSubgroupSize())
+ : maxSupportedSubgroupSize();
+ const VkDeviceSize resultBufferSize = maxSubgroupSize * maxSubgroupSize * maxSubgroupSize;
+ const VkDeviceSize resultBufferSizeInBytes = resultBufferSize * elementSize;
+ Buffer resultBuffer (context, resultBufferSizeInBytes);
+ std::vector< de::SharedPtr<BufferOrImage> > inputBuffers (inputsCount);
for (deUint32 i = 0; i < inputsCount; i++)
{
if (inputs[i].isImage)
{
- inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Image(context,
- static_cast<deUint32>(inputs[i].numElements), 1, inputs[i].format));
+ inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Image(context, static_cast<deUint32>(inputs[i].numElements), 1, inputs[i].format));
}
else
{
- vk::VkDeviceSize size =
- getElementSizeInBytes(inputs[i].format, inputs[i].layout) * inputs[i].numElements;
+ const vk::VkDeviceSize size = getElementSizeInBytes(inputs[i].format, inputs[i].layout) * inputs[i].numElements;
+
inputBuffers[i] = de::SharedPtr<BufferOrImage>(new Buffer(context, size));
}
const Allocation& alloc = inputBuffers[i]->getAllocation();
+
initializeMemory(context, alloc, inputs[i]);
}
poolBuilder.addType(inputBuffers[i]->getType());
}
- const Unique<VkDescriptorPool> descriptorPool(
- poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
-
- // Create descriptor set
- const Unique<VkDescriptorSet> descriptorSet(
- makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
-
- DescriptorSetUpdateBuilder updateBuilder;
-
- const VkDescriptorBufferInfo resultDescriptorInfo =
- makeDescriptorBufferInfo(
- resultBuffer.getBuffer(), 0ull, resultBufferSizeInBytes);
+ const Unique<VkDescriptorPool> descriptorPool (poolBuilder.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+ const Unique<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
+ const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(resultBuffer.getBuffer(), 0ull, resultBufferSizeInBytes);
+ DescriptorSetUpdateBuilder updateBuilder;
- updateBuilder.writeSingle(*descriptorSet,
- DescriptorSetUpdateBuilder::Location::binding(0u),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo);
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo);
for (deUint32 i = 0; i < inputsCount; i++)
{
if (inputBuffers[i]->isImage())
{
- VkDescriptorImageInfo info =
- makeDescriptorImageInfo(inputBuffers[i]->getAsImage()->getSampler(),
- inputBuffers[i]->getAsImage()->getImageView(), VK_IMAGE_LAYOUT_GENERAL);
+ const VkDescriptorImageInfo info = makeDescriptorImageInfo(inputBuffers[i]->getAsImage()->getSampler(), inputBuffers[i]->getAsImage()->getImageView(), VK_IMAGE_LAYOUT_GENERAL);
- updateBuilder.writeSingle(*descriptorSet,
- DescriptorSetUpdateBuilder::Location::binding(i + 1),
- inputBuffers[i]->getType(), &info);
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(i + 1), inputBuffers[i]->getType(), &info);
}
else
{
- vk::VkDeviceSize size =
- getElementSizeInBytes(inputs[i].format, inputs[i].layout) * inputs[i].numElements;
- VkDescriptorBufferInfo info =
- makeDescriptorBufferInfo(inputBuffers[i]->getAsBuffer()->getBuffer(), 0ull, size);
+ vk::VkDeviceSize size = getElementSizeInBytes(inputs[i].format, inputs[i].layout) * inputs[i].numElements;
+ VkDescriptorBufferInfo info = makeDescriptorBufferInfo(inputBuffers[i]->getAsBuffer()->getBuffer(), 0ull, size);
- updateBuilder.writeSingle(*descriptorSet,
- DescriptorSetUpdateBuilder::Location::binding(i + 1),
- inputBuffers[i]->getType(), &info);
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(i + 1), inputBuffers[i]->getType(), &info);
}
}
updateBuilder.update(vk, device);
- const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, device, queueFamilyIndex));
-
- unsigned totalIterations = 0;
- unsigned failedIterations = 0;
-
- const Unique<VkCommandBuffer> cmdBuffer(
- makeCommandBuffer(context, *cmdPool));
-
- std::vector<de::SharedPtr<Move<VkPipeline>>> pipelines(localSizesToTestCount);
+ const Unique<VkCommandPool> cmdPool (makeCommandPool(vk, device, queueFamilyIndex));
+ unsigned totalIterations = 0;
+ unsigned failedIterations = 0;
+ const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer(context, *cmdPool));
+ std::vector<de::SharedPtr<Move<VkPipeline>>> pipelines (localSizesToTestCount);
context.getTestContext().touchWatchdog();
- pipelines[0] =
- de::SharedPtr<Move<VkPipeline>>(new Move<VkPipeline>(
- makeComputePipeline(context, *pipelineLayout, *shaderModule,
- pipelineShaderStageCreateFlags, VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT, (VkPipeline) DE_NULL,
- localSizesToTest[0][0], localSizesToTest[0][1], localSizesToTest[0][2],
- isRequiredSubgroupSize ? subgroupSize : 0u)));
+ {
+ pipelines[0] = de::SharedPtr<Move<VkPipeline>>(new Move<VkPipeline>(makeComputePipeline(context,
+ *pipelineLayout,
+ *shaderModule,
+ pipelineShaderStageCreateFlags,
+ VK_PIPELINE_CREATE_ALLOW_DERIVATIVES_BIT,
+ (VkPipeline) DE_NULL,
+ localSizesToTest[0][0],
+ localSizesToTest[0][1],
+ localSizesToTest[0][2],
+ isRequiredSubgroupSize ? subgroupSize : 0u)));
+ }
context.getTestContext().touchWatchdog();
for (deUint32 index = 1; index < (localSizesToTestCount - 1); index++)
const deUint32 nextZ = localSizesToTest[index][2];
context.getTestContext().touchWatchdog();
- pipelines[index] =
- de::SharedPtr<Move<VkPipeline>>(new Move<VkPipeline>(
- makeComputePipeline(context, *pipelineLayout, *shaderModule,
- pipelineShaderStageCreateFlags, VK_PIPELINE_CREATE_DERIVATIVE_BIT, **pipelines[0],
- nextX, nextY, nextZ,
- isRequiredSubgroupSize ? subgroupSize : 0u)));
+ {
+ pipelines[index] = de::SharedPtr<Move<VkPipeline>>(new Move<VkPipeline>(makeComputePipeline(context,
+ *pipelineLayout,
+ *shaderModule,
+ pipelineShaderStageCreateFlags,
+ VK_PIPELINE_CREATE_DERIVATIVE_BIT,
+ **pipelines[0],
+ nextX,
+ nextY,
+ nextZ,
+ isRequiredSubgroupSize ? subgroupSize : 0u)));
+ }
context.getTestContext().touchWatchdog();
}
for (deUint32 index = 0; index < (localSizesToTestCount - 1); index++)
{
-
// we are running one test
totalIterations++;
beginCommandBuffer(vk, *cmdBuffer);
+ {
+ vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, **pipelines[index]);
- vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, **pipelines[index]);
-
- vk.cmdBindDescriptorSets(*cmdBuffer,
- VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u,
- &descriptorSet.get(), 0u, DE_NULL);
-
- vk.cmdDispatch(*cmdBuffer,numWorkgroups[0], numWorkgroups[1], numWorkgroups[2]);
+ vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+ vk.cmdDispatch(*cmdBuffer,numWorkgroups[0], numWorkgroups[1], numWorkgroups[2]);
+ }
endCommandBuffer(vk, *cmdBuffer);
submitCommandsAndWait(vk, device, queue, *cmdBuffer);
return tcu::TestStatus::pass("OK");
}
-tcu::TestStatus vkt::subgroups::makeComputeTest(
- Context& context, VkFormat format, SSBOData* inputs, deUint32 inputsCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize),
- deUint32 requiredSubgroupSize, const deUint32 pipelineShaderStageCreateFlags)
+tcu::TestStatus vkt::subgroups::makeComputeTest (Context& context,
+ VkFormat format,
+ const SSBOData* inputs,
+ deUint32 inputsCount,
+ const void* internalData,
+ CheckResultCompute checkResult,
+ deUint32 requiredSubgroupSize,
+ const deUint32 pipelineShaderStageCreateFlags)
{
const deUint32 numWorkgroups[3] = {4, 2, 2};
deUint32 subgroupSize = requiredSubgroupSize;
numWorkgroups, requiredSubgroupSize != 0u, subgroupSize, localSizesToTest, localSizesToTestCount);
}
-void vkt::subgroups::supportedCheckShader(Context& context, const vk::VkShaderStageFlags shaderStage)
+static inline void checkShaderStageSetValidity (const VkShaderStageFlags shaderStages)
{
- if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & shaderStage &&
+ if (shaderStages == 0)
+ TCU_THROW(InternalError, "Shader stage is not specified");
+
+ // It can actually be only 1 or 0.
+ const deUint32 exclusivePipelinesCount = (isAllComputeStages(shaderStages) ? 1 :0)
+ + (isAllGraphicsStages(shaderStages) ? 1 :0)
+ + (isAllRayTracingStages(shaderStages) ? 1 :0);
+
+ if (exclusivePipelinesCount != 1)
+ TCU_THROW(InternalError, "Mix of shaders from different pipelines is detected");
+}
+
+void vkt::subgroups::supportedCheckShader (Context& context, const VkShaderStageFlags shaderStages)
+{
+ checkShaderStageSetValidity(shaderStages);
+
+ if ((context.getSubgroupProperties().supportedStages & shaderStages) == 0)
+ {
+ if (isAllComputeStages(shaderStages))
+ TCU_FAIL("Compute shader is required to support subgroup operations");
+ else
+ TCU_THROW(NotSupportedError, "Subgroup support is not available for test shader stage(s)");
+ }
+
+ if ((VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT | VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT) & shaderStages &&
context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") &&
!context.getPortabilitySubsetFeatures().tessellationIsolines)
{
TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: Tessellation iso lines are not supported by this implementation");
}
}
+
+
+namespace vkt
+{
+namespace subgroups
+{
+typedef std::vector< de::SharedPtr<BufferOrImage> > vectorBufferOrImage;
+
+enum ShaderGroups
+{
+ FIRST_GROUP = 0,
+ RAYGEN_GROUP = FIRST_GROUP,
+ MISS_GROUP,
+ HIT_GROUP,
+ CALL_GROUP,
+ GROUP_COUNT
+};
+
+const std::vector<vk::VkFormat> getAllRayTracingFormats()
+{
+ std::vector<VkFormat> formats;
+
+ formats.push_back(VK_FORMAT_R8G8B8_SINT);
+ formats.push_back(VK_FORMAT_R8_UINT);
+ formats.push_back(VK_FORMAT_R8G8B8A8_UINT);
+ formats.push_back(VK_FORMAT_R16G16B16_SINT);
+ formats.push_back(VK_FORMAT_R16_UINT);
+ formats.push_back(VK_FORMAT_R16G16B16A16_UINT);
+ formats.push_back(VK_FORMAT_R32G32B32_SINT);
+ formats.push_back(VK_FORMAT_R32_UINT);
+ formats.push_back(VK_FORMAT_R32G32B32A32_UINT);
+ formats.push_back(VK_FORMAT_R64G64B64_SINT);
+ formats.push_back(VK_FORMAT_R64_UINT);
+ formats.push_back(VK_FORMAT_R64G64B64A64_UINT);
+ formats.push_back(VK_FORMAT_R16G16B16A16_SFLOAT);
+ formats.push_back(VK_FORMAT_R32_SFLOAT);
+ formats.push_back(VK_FORMAT_R32G32B32A32_SFLOAT);
+ formats.push_back(VK_FORMAT_R64_SFLOAT);
+ formats.push_back(VK_FORMAT_R64G64B64_SFLOAT);
+ formats.push_back(VK_FORMAT_R64G64B64A64_SFLOAT);
+ formats.push_back(VK_FORMAT_R8_USCALED);
+ formats.push_back(VK_FORMAT_R8G8_USCALED);
+ formats.push_back(VK_FORMAT_R8G8B8_USCALED);
+ formats.push_back(VK_FORMAT_R8G8B8A8_USCALED);
+
+ return formats;
+}
+
+void addRayTracingNoSubgroupShader (SourceCollections& programCollection)
+{
+ const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, 0u, true);
+
+ const std::string rgenShaderNoSubgroups =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ "layout(location = 0) rayPayloadEXT uvec4 payload;\n"
+ "layout(location = 0) callableDataEXT uvec4 callData;"
+ "layout(set = 1, binding = 0) uniform accelerationStructureEXT topLevelAS;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ " uint rayFlags = 0;\n"
+ " uint cullMask = 0xFF;\n"
+ " float tmin = 0.0;\n"
+ " float tmax = 9.0;\n"
+ " vec3 origin = vec3((float(gl_LaunchIDEXT.x) + 0.5f) / float(gl_LaunchSizeEXT.x), (float(gl_LaunchIDEXT.y) + 0.5f) / float(gl_LaunchSizeEXT.y), 0.0);\n"
+ " vec3 directHit = vec3(0.0, 0.0, -1.0);\n"
+ " vec3 directMiss = vec3(0.0, 0.0, +1.0);\n"
+ "\n"
+ " traceRayEXT(topLevelAS, rayFlags, cullMask, 0, 0, 0, origin, tmin, directHit, tmax, 0);\n"
+ " traceRayEXT(topLevelAS, rayFlags, cullMask, 0, 0, 0, origin, tmin, directMiss, tmax, 0);\n"
+ " executeCallableEXT(0, 0);"
+ "}\n";
+ const std::string hitShaderNoSubgroups =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ "hitAttributeEXT vec3 attribs;\n"
+ "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ "}\n";
+ const std::string missShaderNoSubgroups =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ "layout(location = 0) rayPayloadInEXT vec3 hitValue;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ "}\n";
+ const std::string sectShaderNoSubgroups =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ "hitAttributeEXT vec3 hitAttribute;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ " reportIntersectionEXT(0.75f, gl_HitKindFrontFacingTriangleEXT);\n"
+ "}\n";
+ const std::string callShaderNoSubgroups =
+ "#version 460 core\n"
+ "#extension GL_EXT_ray_tracing: require\n"
+ "layout(location = 0) callableDataInEXT float callData;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ "}\n";
+
+ programCollection.glslSources.add("rgen_noSubgroup") << glu::RaygenSource (rgenShaderNoSubgroups) << buildOptions;
+ programCollection.glslSources.add("ahit_noSubgroup") << glu::AnyHitSource (hitShaderNoSubgroups) << buildOptions;
+ programCollection.glslSources.add("chit_noSubgroup") << glu::ClosestHitSource (hitShaderNoSubgroups) << buildOptions;
+ programCollection.glslSources.add("miss_noSubgroup") << glu::MissSource (missShaderNoSubgroups) << buildOptions;
+ programCollection.glslSources.add("sect_noSubgroup") << glu::IntersectionSource (sectShaderNoSubgroups) << buildOptions;
+ programCollection.glslSources.add("call_noSubgroup") << glu::CallableSource (callShaderNoSubgroups) << buildOptions;
+}
+
+static vector<VkShaderStageFlagBits> enumerateRayTracingShaderStages (const VkShaderStageFlags shaderStage)
+{
+ vector<VkShaderStageFlagBits> result;
+ const VkShaderStageFlagBits shaderStageFlags[] =
+ {
+ VK_SHADER_STAGE_RAYGEN_BIT_KHR,
+ VK_SHADER_STAGE_ANY_HIT_BIT_KHR,
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,
+ VK_SHADER_STAGE_MISS_BIT_KHR,
+ VK_SHADER_STAGE_INTERSECTION_BIT_KHR,
+ VK_SHADER_STAGE_CALLABLE_BIT_KHR,
+ };
+
+ for (auto shaderStageFlag: shaderStageFlags)
+ {
+ if (0 != (shaderStage & shaderStageFlag))
+ result.push_back(shaderStageFlag);
+ }
+
+ return result;
+}
+
+static deUint32 getRayTracingResultBinding (const VkShaderStageFlagBits shaderStage)
+{
+ const VkShaderStageFlags shaderStageFlags[] =
+ {
+ VK_SHADER_STAGE_RAYGEN_BIT_KHR,
+ VK_SHADER_STAGE_ANY_HIT_BIT_KHR,
+ VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR,
+ VK_SHADER_STAGE_MISS_BIT_KHR,
+ VK_SHADER_STAGE_INTERSECTION_BIT_KHR,
+ VK_SHADER_STAGE_CALLABLE_BIT_KHR,
+ };
+
+ for (deUint32 shaderStageNdx = 0; shaderStageNdx < DE_LENGTH_OF_ARRAY(shaderStageFlags); ++shaderStageNdx)
+ {
+ if (0 != (shaderStage & shaderStageFlags[shaderStageNdx]))
+ {
+ DE_ASSERT(0 == (shaderStage & (~shaderStageFlags[shaderStageNdx])));
+
+ return shaderStageNdx;
+ }
+ }
+
+ TCU_THROW(InternalError, "Non-raytracing stage specified or no stage at all");
+}
+
+static vectorBufferOrImage makeRayTracingInputBuffers (Context& context,
+ VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const vector<VkShaderStageFlagBits>& stagesVector)
+{
+ const size_t stagesCount = stagesVector.size();
+ const VkDeviceSize shaderSize = getMaxWidth();
+ const VkDeviceSize inputBufferSize = getElementSizeInBytes(format, SSBOData::LayoutStd430) * shaderSize;
+ vectorBufferOrImage inputBuffers (stagesCount + extraDatasCount);
+
+ // The implicit result SSBO we use to store our outputs from the shader
+ for (size_t stageNdx = 0u; stageNdx < stagesCount; ++stageNdx)
+ inputBuffers[stageNdx] = de::SharedPtr<BufferOrImage>(new Buffer(context, inputBufferSize));
+
+ for (size_t stageNdx = stagesCount; stageNdx < stagesCount + extraDatasCount; ++stageNdx)
+ {
+ const size_t datasNdx = stageNdx - stagesCount;
+
+ if (extraDatas[datasNdx].isImage)
+ {
+ inputBuffers[stageNdx] = de::SharedPtr<BufferOrImage>(new Image(context, static_cast<deUint32>(extraDatas[datasNdx].numElements), 1, extraDatas[datasNdx].format));
+ }
+ else
+ {
+ const VkDeviceSize size = getElementSizeInBytes(extraDatas[datasNdx].format, extraDatas[datasNdx].layout) * extraDatas[datasNdx].numElements;
+
+ inputBuffers[stageNdx] = de::SharedPtr<BufferOrImage>(new Buffer(context, size));
+ }
+
+ initializeMemory(context, inputBuffers[stageNdx]->getAllocation(), extraDatas[datasNdx]);
+ }
+
+ return inputBuffers;
+}
+
+static Move<VkDescriptorSetLayout> makeRayTracingDescriptorSetLayout (Context& context,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const vector<VkShaderStageFlagBits>& stagesVector,
+ const vectorBufferOrImage& inputBuffers)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const size_t stagesCount = stagesVector.size();
+ DescriptorSetLayoutBuilder layoutBuilder;
+
+ // The implicit result SSBO we use to store our outputs from the shader
+ for (size_t stageNdx = 0u; stageNdx < stagesVector.size(); ++stageNdx)
+ {
+ const deUint32 stageBinding = getRayTracingResultBinding(stagesVector[stageNdx]);
+
+ layoutBuilder.addIndexedBinding(inputBuffers[stageNdx]->getType(), 1, stagesVector[stageNdx], stageBinding, DE_NULL);
+ }
+
+ for (size_t stageNdx = stagesCount; stageNdx < stagesCount + extraDatasCount; ++stageNdx)
+ {
+ const size_t datasNdx = stageNdx - stagesCount;
+
+ layoutBuilder.addIndexedBinding(inputBuffers[stageNdx]->getType(), 1, extraDatas[datasNdx].stages, extraDatas[datasNdx].binding, DE_NULL);
+ }
+
+ return layoutBuilder.build(vkd, device);
+}
+
+static Move<VkDescriptorSetLayout> makeRayTracingDescriptorSetLayoutAS (Context& context)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ DescriptorSetLayoutBuilder layoutBuilder;
+
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, VK_SHADER_STAGE_RAYGEN_BIT_KHR);
+
+ return layoutBuilder.build(vkd, device);
+}
+
+static Move<VkDescriptorPool> makeRayTracingDescriptorPool (Context& context,
+ const vectorBufferOrImage& inputBuffers)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const deUint32 maxDescriptorSets = 2u;
+ DescriptorPoolBuilder poolBuilder;
+ Move<VkDescriptorPool> result;
+
+ if (inputBuffers.size() > 0)
+ {
+ for (size_t ndx = 0u; ndx < inputBuffers.size(); ndx++)
+ poolBuilder.addType(inputBuffers[ndx]->getType());
+ }
+
+ poolBuilder.addType(VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR);
+
+ result = poolBuilder.build(vkd, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, maxDescriptorSets);
+
+ return result;
+}
+
+static Move<VkDescriptorSet> makeRayTracingDescriptorSet (Context& context,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorSetLayout descriptorSetLayout,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const vector<VkShaderStageFlagBits>& stagesVector,
+ const vectorBufferOrImage& inputBuffers)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const size_t stagesCount = stagesVector.size();
+ Move<VkDescriptorSet> descriptorSet;
+
+ if (inputBuffers.size() > 0)
+ {
+ DescriptorSetUpdateBuilder updateBuilder;
+
+ // Create descriptor set
+ descriptorSet = makeDescriptorSet(vkd, device, descriptorPool, descriptorSetLayout);
+
+ for (size_t ndx = 0u; ndx < stagesCount + extraDatasCount; ndx++)
+ {
+ const deUint32 binding = (ndx < stagesCount)
+ ? getRayTracingResultBinding(stagesVector[ndx])
+ : extraDatas[ndx - stagesCount].binding;
+
+ if (inputBuffers[ndx]->isImage())
+ {
+ const VkDescriptorImageInfo info = makeDescriptorImageInfo(inputBuffers[ndx]->getAsImage()->getSampler(), inputBuffers[ndx]->getAsImage()->getImageView(), VK_IMAGE_LAYOUT_GENERAL);
+
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(binding), inputBuffers[ndx]->getType(), &info);
+ }
+ else
+ {
+ const VkDescriptorBufferInfo info = makeDescriptorBufferInfo(inputBuffers[ndx]->getAsBuffer()->getBuffer(), 0ull, inputBuffers[ndx]->getAsBuffer()->getSize());
+
+ updateBuilder.writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(binding), inputBuffers[ndx]->getType(), &info);
+ }
+ }
+
+ updateBuilder.update(vkd, device);
+ }
+
+ return descriptorSet;
+}
+
+static Move<VkDescriptorSet> makeRayTracingDescriptorSetAS (Context& context,
+ VkDescriptorPool descriptorPool,
+ VkDescriptorSetLayout descriptorSetLayout,
+ de::MovePtr<TopLevelAccelerationStructure>& topLevelAccelerationStructure)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const TopLevelAccelerationStructure* topLevelAccelerationStructurePtr = topLevelAccelerationStructure.get();
+ const VkWriteDescriptorSetAccelerationStructureKHR accelerationStructureWriteDescriptorSet =
+ {
+ VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_ACCELERATION_STRUCTURE_KHR, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 1u, // deUint32 accelerationStructureCount;
+ topLevelAccelerationStructurePtr->getPtr(), // const VkAccelerationStructureKHR* pAccelerationStructures;
+ };
+ Move<VkDescriptorSet> descriptorSet = makeDescriptorSet(vkd, device, descriptorPool, descriptorSetLayout);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_KHR, &accelerationStructureWriteDescriptorSet)
+ .update(vkd, device);
+
+ return descriptorSet;
+}
+
+static Move<VkPipelineLayout> makeRayTracingPipelineLayout (Context& context,
+ const VkDescriptorSetLayout descriptorSetLayout0,
+ const VkDescriptorSetLayout descriptorSetLayout1)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const std::vector<VkDescriptorSetLayout> descriptorSetLayouts { descriptorSetLayout0, descriptorSetLayout1 };
+ const deUint32 descriptorSetLayoutsSize = static_cast<deUint32>(descriptorSetLayouts.size());
+
+ return makePipelineLayout(vkd, device, descriptorSetLayoutsSize, descriptorSetLayouts.data());
+}
+
+static de::MovePtr<TopLevelAccelerationStructure> createTopAccelerationStructure (Context& context,
+ de::SharedPtr<BottomLevelAccelerationStructure> bottomLevelAccelerationStructure)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ Allocator& allocator = context.getDefaultAllocator();
+ de::MovePtr<TopLevelAccelerationStructure> result = makeTopLevelAccelerationStructure();
+
+ result->setInstanceCount(1);
+ result->addInstance(bottomLevelAccelerationStructure);
+ result->create(vkd, device, allocator);
+
+ return result;
+}
+
+static de::SharedPtr<BottomLevelAccelerationStructure> createBottomAccelerationStructure (Context& context)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ Allocator& allocator = context.getDefaultAllocator();
+ de::MovePtr<BottomLevelAccelerationStructure> result = makeBottomLevelAccelerationStructure();
+ const std::vector<tcu::Vec3> geometryData { tcu::Vec3(-1.0f, -1.0f, -2.0f), tcu::Vec3(+1.0f, +1.0f, -1.0f) };
+
+ result->setGeometryCount(1u);
+ result->addGeometry(geometryData, false);
+ result->create(vkd, device, allocator, 0u);
+
+ return de::SharedPtr<BottomLevelAccelerationStructure>(result.release());
+}
+
+static de::MovePtr<RayTracingPipeline> makeRayTracingPipeline (Context& context,
+ const VkShaderStageFlags shaderStageTested,
+ const VkPipelineLayout pipelineLayout,
+ const deUint32 shaderStageCreateFlags[6],
+ const deUint32 requiredSubgroupSize[6],
+ Move<VkPipeline>& pipelineOut)
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ BinaryCollection& collection = context.getBinaryCollection();
+ const char* shaderRgenName = (0 != (shaderStageTested & VK_SHADER_STAGE_RAYGEN_BIT_KHR)) ? "rgen" : "rgen_noSubgroup";
+ const char* shaderAhitName = (0 != (shaderStageTested & VK_SHADER_STAGE_ANY_HIT_BIT_KHR)) ? "ahit" : "ahit_noSubgroup";
+ const char* shaderChitName = (0 != (shaderStageTested & VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR)) ? "chit" : "chit_noSubgroup";
+ const char* shaderMissName = (0 != (shaderStageTested & VK_SHADER_STAGE_MISS_BIT_KHR)) ? "miss" : "miss_noSubgroup";
+ const char* shaderSectName = (0 != (shaderStageTested & VK_SHADER_STAGE_INTERSECTION_BIT_KHR)) ? "sect" : "sect_noSubgroup";
+ const char* shaderCallName = (0 != (shaderStageTested & VK_SHADER_STAGE_CALLABLE_BIT_KHR)) ? "call" : "call_noSubgroup";
+ const VkShaderModuleCreateFlags noShaderModuleCreateFlags = static_cast<VkShaderModuleCreateFlags>(0);
+ Move<VkShaderModule> rgenShaderModule = createShaderModule(vkd, device, collection.get(shaderRgenName), noShaderModuleCreateFlags);
+ Move<VkShaderModule> ahitShaderModule = createShaderModule(vkd, device, collection.get(shaderAhitName), noShaderModuleCreateFlags);
+ Move<VkShaderModule> chitShaderModule = createShaderModule(vkd, device, collection.get(shaderChitName), noShaderModuleCreateFlags);
+ Move<VkShaderModule> missShaderModule = createShaderModule(vkd, device, collection.get(shaderMissName), noShaderModuleCreateFlags);
+ Move<VkShaderModule> sectShaderModule = createShaderModule(vkd, device, collection.get(shaderSectName), noShaderModuleCreateFlags);
+ Move<VkShaderModule> callShaderModule = createShaderModule(vkd, device, collection.get(shaderCallName), noShaderModuleCreateFlags);
+ const VkPipelineShaderStageCreateFlags noPipelineShaderStageCreateFlags = static_cast<VkPipelineShaderStageCreateFlags>(0);
+ const VkPipelineShaderStageCreateFlags rgenPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[0];
+ const VkPipelineShaderStageCreateFlags ahitPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[1];
+ const VkPipelineShaderStageCreateFlags chitPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[2];
+ const VkPipelineShaderStageCreateFlags missPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[3];
+ const VkPipelineShaderStageCreateFlags sectPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[4];
+ const VkPipelineShaderStageCreateFlags callPipelineShaderStageCreateFlags = (shaderStageCreateFlags == DE_NULL) ? noPipelineShaderStageCreateFlags : shaderStageCreateFlags[5];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT requiredSubgroupSizeCreateInfo[6] =
+ {
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[0] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[1] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[2] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[3] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[4] : 0u,
+ },
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO_EXT,
+ DE_NULL,
+ requiredSubgroupSize != DE_NULL ? requiredSubgroupSize[5] : 0u,
+ },
+ };
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* rgenRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[0].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[0];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* ahitRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[1].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[1];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* chitRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[2].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[2];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* missRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[3].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[3];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* sectRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[4].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[4];
+ const VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT* callRequiredSubgroupSizeCreateInfo = (requiredSubgroupSizeCreateInfo[5].requiredSubgroupSize == 0) ? DE_NULL : &requiredSubgroupSizeCreateInfo[5];
+ de::MovePtr<RayTracingPipeline> rayTracingPipeline = de::newMovePtr<RayTracingPipeline>();
+
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_RAYGEN_BIT_KHR , rgenShaderModule, RAYGEN_GROUP, DE_NULL, rgenPipelineShaderStageCreateFlags, rgenRequiredSubgroupSizeCreateInfo);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_ANY_HIT_BIT_KHR , ahitShaderModule, HIT_GROUP, DE_NULL, ahitPipelineShaderStageCreateFlags, ahitRequiredSubgroupSizeCreateInfo);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR , chitShaderModule, HIT_GROUP, DE_NULL, chitPipelineShaderStageCreateFlags, chitRequiredSubgroupSizeCreateInfo);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_MISS_BIT_KHR , missShaderModule, MISS_GROUP, DE_NULL, missPipelineShaderStageCreateFlags, missRequiredSubgroupSizeCreateInfo);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_INTERSECTION_BIT_KHR , sectShaderModule, HIT_GROUP, DE_NULL, sectPipelineShaderStageCreateFlags, sectRequiredSubgroupSizeCreateInfo);
+ rayTracingPipeline->addShader(VK_SHADER_STAGE_CALLABLE_BIT_KHR , callShaderModule, CALL_GROUP, DE_NULL, callPipelineShaderStageCreateFlags, callRequiredSubgroupSizeCreateInfo);
+
+ // Must execute createPipeline here, due to pNext pointers in calls to addShader are local
+ pipelineOut = rayTracingPipeline->createPipeline(vkd, device, pipelineLayout);
+
+ return rayTracingPipeline;
+}
+
+VkShaderStageFlags getPossibleRayTracingSubgroupStages (Context& context, const VkShaderStageFlags testedStages)
+{
+ const VkPhysicalDeviceSubgroupProperties& subgroupProperties = context.getSubgroupProperties();
+ const VkShaderStageFlags stages = testedStages & subgroupProperties.supportedStages;
+
+ DE_ASSERT(isAllRayTracingStages(testedStages));
+
+ return stages;
+}
+
+tcu::TestStatus allRayTracingStages (Context& context,
+ VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDataCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const VkShaderStageFlags shaderStage)
+{
+ return vkt::subgroups::allRayTracingStagesRequiredSubgroupSize(context,
+ format,
+ extraDatas,
+ extraDataCount,
+ internalData,
+ checkResult,
+ shaderStage,
+ DE_NULL,
+ DE_NULL);
+}
+
+tcu::TestStatus allRayTracingStagesRequiredSubgroupSize (Context& context,
+ VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const VkShaderStageFlags shaderStageTested,
+ const deUint32 shaderStageCreateFlags[6],
+ const deUint32 requiredSubgroupSize[6])
+{
+ const DeviceInterface& vkd = context.getDeviceInterface();
+ const VkDevice device = context.getDevice();
+ const VkQueue queue = context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
+ Allocator& allocator = context.getDefaultAllocator();
+ const deUint32 subgroupSize = getSubgroupSize(context);
+ const deUint32 maxWidth = getMaxWidth();
+ const vector<VkShaderStageFlagBits> stagesVector = enumerateRayTracingShaderStages(shaderStageTested);
+ const deUint32 stagesCount = static_cast<deUint32>(stagesVector.size());
+ de::SharedPtr<BottomLevelAccelerationStructure> bottomLevelAccelerationStructure = createBottomAccelerationStructure(context);
+ de::MovePtr<TopLevelAccelerationStructure> topLevelAccelerationStructure = createTopAccelerationStructure(context, bottomLevelAccelerationStructure);
+ vectorBufferOrImage inputBuffers = makeRayTracingInputBuffers(context, format, extraDatas, extraDatasCount, stagesVector);
+ const Move<VkDescriptorSetLayout> descriptorSetLayout = makeRayTracingDescriptorSetLayout(context, extraDatas, extraDatasCount, stagesVector, inputBuffers);
+ const Move<VkDescriptorSetLayout> descriptorSetLayoutAS = makeRayTracingDescriptorSetLayoutAS(context);
+ const Move<VkPipelineLayout> pipelineLayout = makeRayTracingPipelineLayout(context, *descriptorSetLayout, *descriptorSetLayoutAS);
+ Move<VkPipeline> pipeline = Move<VkPipeline>();
+ const de::MovePtr<RayTracingPipeline> rayTracingPipeline = makeRayTracingPipeline(context, shaderStageTested, *pipelineLayout, shaderStageCreateFlags, requiredSubgroupSize, pipeline);
+ const deUint32 shaderGroupHandleSize = context.getRayTracingPipelineProperties().shaderGroupHandleSize;
+ const deUint32 shaderGroupBaseAlignment = context.getRayTracingPipelineProperties().shaderGroupBaseAlignment;
+ de::MovePtr<BufferWithMemory> rgenShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, RAYGEN_GROUP, 1u);
+ de::MovePtr<BufferWithMemory> missShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, MISS_GROUP, 1u);
+ de::MovePtr<BufferWithMemory> hitsShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, HIT_GROUP, 1u);
+ de::MovePtr<BufferWithMemory> callShaderBindingTable = rayTracingPipeline->createShaderBindingTable(vkd, device, *pipeline, allocator, shaderGroupHandleSize, shaderGroupBaseAlignment, CALL_GROUP, 1u);
+ const VkStridedDeviceAddressRegionKHR rgenShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, rgenShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR missShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, missShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR hitsShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, hitsShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const VkStridedDeviceAddressRegionKHR callShaderBindingTableRegion = makeStridedDeviceAddressRegionKHR(getBufferDeviceAddress(vkd, device, callShaderBindingTable->get(), 0), shaderGroupHandleSize, shaderGroupHandleSize);
+ const Move<VkDescriptorPool> descriptorPool = makeRayTracingDescriptorPool(context, inputBuffers);
+ const Move<VkDescriptorSet> descriptorSet = makeRayTracingDescriptorSet(context, *descriptorPool, *descriptorSetLayout, extraDatas, extraDatasCount, stagesVector, inputBuffers);
+ const Move<VkDescriptorSet> descriptorSetAS = makeRayTracingDescriptorSetAS(context, *descriptorPool, *descriptorSetLayoutAS, topLevelAccelerationStructure);
+ const Move<VkCommandPool> cmdPool = makeCommandPool(vkd, device, queueFamilyIndex);
+ const Move<VkCommandBuffer> cmdBuffer = makeCommandBuffer(context, *cmdPool);
+ deUint32 passIterations = 0u;
+ deUint32 failIterations = 0u;
+
+ DE_ASSERT(shaderStageTested != 0);
+
+ for (deUint32 width = 1u; width < maxWidth; width = getNextWidth(width))
+ {
+
+ for (deUint32 ndx = stagesCount; ndx < stagesCount + extraDatasCount; ++ndx)
+ {
+ // re-init the data
+ const Allocation& alloc = inputBuffers[ndx]->getAllocation();
+
+ initializeMemory(context, alloc, extraDatas[ndx - stagesCount]);
+ }
+
+ beginCommandBuffer(vkd, *cmdBuffer);
+ {
+ vkd.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, *pipeline);
+
+ bottomLevelAccelerationStructure->build(vkd, device, *cmdBuffer);
+ topLevelAccelerationStructure->build(vkd, device, *cmdBuffer);
+
+ vkd.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, *pipelineLayout, 1u, 1u, &descriptorSetAS.get(), 0u, DE_NULL);
+
+ if (stagesCount + extraDatasCount > 0)
+ vkd.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_RAY_TRACING_KHR, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
+
+ cmdTraceRays(vkd,
+ *cmdBuffer,
+ &rgenShaderBindingTableRegion,
+ &missShaderBindingTableRegion,
+ &hitsShaderBindingTableRegion,
+ &callShaderBindingTableRegion,
+ width, 1, 1);
+
+ const VkMemoryBarrier postTraceMemoryBarrier = makeMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT);
+ cmdPipelineMemoryBarrier(vkd, *cmdBuffer, VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_KHR, VK_PIPELINE_STAGE_HOST_BIT, &postTraceMemoryBarrier);
+ }
+ endCommandBuffer(vkd, *cmdBuffer);
+
+ submitCommandsAndWait(vkd, device, queue, *cmdBuffer);
+
+ for (deUint32 ndx = 0u; ndx < stagesCount; ++ndx)
+ {
+ std::vector<const void*> datas;
+
+ if (!inputBuffers[ndx]->isImage())
+ {
+ const Allocation& resultAlloc = inputBuffers[ndx]->getAllocation();
+
+ invalidateAlloc(vkd, device, resultAlloc);
+
+ // we always have our result data first
+ datas.push_back(resultAlloc.getHostPtr());
+ }
+
+ for (deUint32 index = stagesCount; index < stagesCount + extraDatasCount; ++index)
+ {
+ const deUint32 datasNdx = index - stagesCount;
+
+ if ((stagesVector[ndx] & extraDatas[datasNdx].stages) && (!inputBuffers[index]->isImage()))
+ {
+ const Allocation& resultAlloc = inputBuffers[index]->getAllocation();
+
+ invalidateAlloc(vkd, device, resultAlloc);
+
+ // we always have our result data first
+ datas.push_back(resultAlloc.getHostPtr());
+ }
+ }
+
+ if (!checkResult(internalData, datas, width, subgroupSize, false))
+ failIterations++;
+ else
+ passIterations++;
+ }
+
+ vkd.resetCommandBuffer(*cmdBuffer, 0);
+ }
+
+ if (failIterations > 0 || passIterations == 0)
+ return tcu::TestStatus::fail("Failed " + de::toString(failIterations) + " out of " + de::toString(failIterations + passIterations) + " iterations.");
+ else
+ return tcu::TestStatus::pass("OK");
+}
+} // namespace subgroups
+} // nsamespace vkt
#include "vkTypeUtil.hpp"
#include "vktTestCase.hpp"
#include "vktTestCaseUtil.hpp"
+#include "vkRayTracingUtil.hpp"
#include "tcuFormatUtil.hpp"
#include "tcuTestLog.hpp"
{
namespace subgroups
{
+typedef bool (*CheckResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize);
+typedef bool (*CheckResultFragment)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 height, deUint32 subgroupSize);
+typedef bool (*CheckResultCompute)(const void* internalData, std::vector<const void*> datas, const deUint32 numWorkgroups[3], const deUint32 localSize[3], deUint32 subgroupSize);
+
// A struct to represent input data to a shader
struct SSBOData
{
- SSBOData() :
- initializeType (InitializeNone),
- layout (LayoutStd140),
- format (vk::VK_FORMAT_UNDEFINED),
- numElements (0),
- isImage (false),
- binding (0u),
- stages ((vk::VkShaderStageFlagBits)0u)
- {}
-
enum InputDataInitializeType
{
InitializeNone = 0,
InitializeNonZero,
InitializeZero,
- } initializeType;
-
+ };
enum InputDataLayoutType
{
LayoutStd140 = 0,
LayoutStd430,
LayoutPacked
- } layout;
+ };
+
+ SSBOData() :
+ initializeType (InitializeNone),
+ layout (LayoutStd140),
+ format (vk::VK_FORMAT_UNDEFINED),
+ numElements (0),
+ isImage (false),
+ binding (0u),
+ stages ((vk::VkShaderStageFlags)0u)
+ {}
+
+ SSBOData (InputDataInitializeType initializeType_,
+ InputDataLayoutType layout_,
+ vk::VkFormat format_,
+ vk::VkDeviceSize numElements_,
+ bool isImage_ = false,
+ deUint32 binding_ = 0u,
+ vk::VkShaderStageFlags stages_ = static_cast<vk::VkShaderStageFlags>(0u))
+ : initializeType (initializeType_)
+ , layout (layout_)
+ , format (format_)
+ , numElements (numElements_)
+ , isImage (isImage_)
+ , binding (binding_)
+ , stages (stages_)
+ {}
+ InputDataInitializeType initializeType;
+ InputDataLayoutType layout;
vk::VkFormat format;
vk::VkDeviceSize numElements;
bool isImage;
deUint32 binding;
- vk::VkShaderStageFlagBits stages;
+ vk::VkShaderStageFlags stages;
};
-std::string getSharedMemoryBallotHelper();
-
-std::string getSharedMemoryBallotHelperARB();
+deUint32 getStagesCount (vk::VkShaderStageFlags shaderStages);
-deUint32 getSubgroupSize(Context& context);
+std::string getSharedMemoryBallotHelper ();
-vk::VkDeviceSize maxSupportedSubgroupSize();
+std::string getSharedMemoryBallotHelperARB ();
-std::string getShaderStageName(vk::VkShaderStageFlags stage);
+deUint32 getSubgroupSize (Context& context);
-std::string getSubgroupFeatureName(vk::VkSubgroupFeatureFlagBits bit);
+deUint32 maxSupportedSubgroupSize ();
-void addNoSubgroupShader (vk::SourceCollections& programCollection);
+std::string getShaderStageName (vk::VkShaderStageFlags stage);
-std::string getVertShaderForStage(vk::VkShaderStageFlags stage);//TODO
+std::string getSubgroupFeatureName (vk::VkSubgroupFeatureFlagBits bit);
-void initStdFrameBufferPrograms( vk::SourceCollections& programCollection,
- const vk::ShaderBuildOptions& buildOptions,
- vk::VkShaderStageFlags shaderStage,
- vk::VkFormat format,
- bool gsPointSize,
- std::string extHeader,
- std::string testSrc,
- std::string helperStr);
+void addNoSubgroupShader (vk::SourceCollections& programCollection);
-void initStdPrograms( vk::SourceCollections& programCollection,
- const vk::ShaderBuildOptions& buildOptions,
- vk::VkShaderStageFlags shaderStage,
- vk::VkFormat format,
- bool gsPointSize,
- std::string extHeader,
- std::string testSrc,
- std::string helperStr);
+void initStdFrameBufferPrograms (vk::SourceCollections& programCollection,
+ const vk::ShaderBuildOptions& buildOptions,
+ vk::VkShaderStageFlags shaderStage,
+ vk::VkFormat format,
+ bool gsPointSize,
+ const std::string& extHeader,
+ const std::string& testSrc,
+ const std::string& helperStr,
+ const std::vector<std::string>& declarations = std::vector<std::string>());
-bool isSubgroupSupported(Context& context);
+void initStdPrograms (vk::SourceCollections& programCollection,
+ const vk::ShaderBuildOptions& buildOptions,
+ vk::VkShaderStageFlags shaderStage,
+ vk::VkFormat format,
+ bool gsPointSize,
+ const std::string& extHeader,
+ const std::string& testSrc,
+ const std::string& helperStr,
+ const std::vector<std::string>& declarations = std::vector<std::string>(),
+ const bool avoidHelperInvocations = false,
+ const std::string& tempRes = " uint tempRes;\n");
-bool areSubgroupOperationsSupportedForStage(
- Context& context, vk::VkShaderStageFlags stage);
+bool isSubgroupSupported (Context& context);
-bool areSubgroupOperationsRequiredForStage(vk::VkShaderStageFlags stage);
+bool areSubgroupOperationsSupportedForStage (Context& context, vk::VkShaderStageFlags stage);
-bool isSubgroupFeatureSupportedForDevice(Context& context, vk::VkSubgroupFeatureFlagBits bit);
+bool isSubgroupFeatureSupportedForDevice (Context& context, vk::VkSubgroupFeatureFlagBits bit);
-bool isFragmentSSBOSupportedForDevice(Context& context);
+bool isFragmentSSBOSupportedForDevice (Context& context);
-bool isVertexSSBOSupportedForDevice(Context& context);
+bool isVertexSSBOSupportedForDevice (Context& context);
-bool isFormatSupportedForDevice(Context& context, vk::VkFormat format);
+bool isFormatSupportedForDevice (Context& context, vk::VkFormat format);
-bool isInt64SupportedForDevice(Context& context);
+bool isInt64SupportedForDevice (Context& context);
-bool isTessellationAndGeometryPointSizeSupported(Context& context);
+bool isTessellationAndGeometryPointSizeSupported (Context& context);
-bool is16BitUBOStorageSupported(Context& context);
+bool is16BitUBOStorageSupported (Context& context);
-bool is8BitUBOStorageSupported(Context& context);
+bool is8BitUBOStorageSupported (Context& context);
-bool isSubgroupBroadcastDynamicIdSupported(Context& context);
+bool isSubgroupBroadcastDynamicIdSupported (Context& context);
std::string getFormatNameForGLSL (vk::VkFormat format);
bool isFormatUnsigned (vk::VkFormat format);
bool isFormatFloat (vk::VkFormat format);
bool isFormatBool (vk::VkFormat format);
-bool isFormat8bitTy(vk::VkFormat format);
-bool isFormat16BitTy(vk::VkFormat format);
+bool isFormat8bitTy (vk::VkFormat format);
+bool isFormat16BitTy (vk::VkFormat format);
void addGeometryShadersFromTemplate (const std::string& glslTemplate, const vk::ShaderBuildOptions& options, vk::GlslSourceCollection& collection);
void addGeometryShadersFromTemplate (const std::string& spirvTemplate, const vk::SpirVAsmBuildOptions& options, vk::SpirVAsmCollection& collection);
void setTesEvalShaderFrameBuffer (vk::SourceCollections& programCollection);
-bool check(std::vector<const void*> datas,
- deUint32 width, deUint32 ref);
+bool check (std::vector<const void*> datas, deUint32 width, deUint32 ref);
-bool checkCompute(std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 ref);
+bool checkCompute (std::vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32 ref);
-tcu::TestStatus makeTessellationEvaluationFrameBufferTest(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const vk::VkShaderStageFlags shaderStage = vk::VK_SHADER_STAGE_ALL_GRAPHICS);
+tcu::TestStatus makeTessellationEvaluationFrameBufferTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult,
+ const vk::VkShaderStageFlags shaderStage = vk::VK_SHADER_STAGE_ALL_GRAPHICS);
-tcu::TestStatus makeGeometryFrameBufferTest(Context& context, vk::VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize));
+tcu::TestStatus makeGeometryFrameBufferTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult);
// Allows using verification functions with or without the optional last boolean argument.
// If using a function that does not need the last argument, it will not be passed down to it.
AllArgsVariant m_allArgsFunc;
};
-tcu::TestStatus allStages(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- const VerificationFunctor& checkResult,
- const vk::VkShaderStageFlags shaderStage);
-
-tcu::TestStatus makeVertexFrameBufferTest(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize));
-
-tcu::TestStatus makeFragmentFrameBufferTest(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width,
- deUint32 height, deUint32 subgroupSize));
-
-tcu::TestStatus makeComputeTest(
- Context& context, vk::VkFormat format, SSBOData* inputs,
- deUint32 inputsCount,const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize),
- deUint32 requiredSubgroupSize = 0u, const deUint32 pipelineShaderStageCreateFlags = 0u);
+vk::VkShaderStageFlags getPossibleGraphicsSubgroupStages (Context& context, const vk::VkShaderStageFlags testedStages);
+
+tcu::TestStatus allStages (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStage);
+
+tcu::TestStatus makeVertexFrameBufferTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult);
+
+tcu::TestStatus makeFragmentFrameBufferTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResultFragment checkResult);
+
+tcu::TestStatus makeComputeTest (Context& context,
+ vk::VkFormat format,
+ const SSBOData* inputs,
+ deUint32 inputsCount,
+ const void* internalData,
+ CheckResultCompute checkResult,
+ deUint32 requiredSubgroupSize = 0u,
+ const deUint32 pipelineShaderStageCreateFlags = 0u);
/* Functions needed for VK_EXT_subgroup_size_control tests */
-tcu::TestStatus makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const vk::VkShaderStageFlags shaderStage = vk::VK_SHADER_STAGE_ALL_GRAPHICS,
- const deUint32 tessShaderStageCreateFlags = 0u, const deUint32 requiredSubgroupSize = 0u);
-
-tcu::TestStatus makeGeometryFrameBufferTestRequiredSubgroupSize(Context& context, vk::VkFormat format, SSBOData* extraData,
- deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const deUint32 geometryShaderStageCreateFlags = 0u, const deUint32 requiredSubgroupSize = 0u);
-
-tcu::TestStatus allStagesRequiredSubgroupSize(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- const VerificationFunctor& checkResult,
- const vk::VkShaderStageFlags shaderStage,
- const deUint32 vertexShaderStageCreateFlags,
- const deUint32 tessellationControlShaderStageCreateFlags,
- const deUint32 tessellationEvalShaderStageCreateFlags,
- const deUint32 geometryShaderStageCreateFlags,
- const deUint32 fragmentShaderStageCreateFlags,
- const deUint32 requiredSubgroupSize[5]);
-
-tcu::TestStatus makeVertexFrameBufferTestRequiredSubgroupSize(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width, deUint32 subgroupSize),
- const deUint32 vertexShaderStageCreateFlags = 0u,
- const deUint32 requiredSubgroupSize = 0u);
-
-tcu::TestStatus makeFragmentFrameBufferTestRequiredSubgroupSize(Context& context, vk::VkFormat format,
- SSBOData* extraData, deUint32 extraDataCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas, deUint32 width,
- deUint32 height, deUint32 subgroupSize),
- const deUint32 fragmentShaderStageCreateFlags = 0u, const deUint32 requiredSubgroupSize = 0u);
-
-tcu::TestStatus makeComputeTestRequiredSubgroupSize(
- Context& context, vk::VkFormat format, SSBOData* inputs, deUint32 inputsCount, const void* internalData,
- bool (*checkResult)(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32 subgroupSize),
- const deUint32 pipelineShaderStageCreateFlags, const deUint32 numWorkgroups[3],
- const deBool isRequiredSubgroupSize, const deUint32 subgroupSize, const deUint32 localSizesToTest[][3], const deUint32 localSizesToTestCount);
-
-void supportedCheckShader(Context& context, const vk::VkShaderStageFlags shaderStage);
+tcu::TestStatus makeTessellationEvaluationFrameBufferTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult,
+ const vk::VkShaderStageFlags shaderStage = vk::VK_SHADER_STAGE_ALL_GRAPHICS,
+ const deUint32 tessShaderStageCreateFlags = 0u,
+ const deUint32 requiredSubgroupSize = 0u);
+
+tcu::TestStatus makeGeometryFrameBufferTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult,
+ const deUint32 geometryShaderStageCreateFlags = 0u,
+ const deUint32 requiredSubgroupSize = 0u);
+
+tcu::TestStatus allStagesRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStageTested,
+ const deUint32 vertexShaderStageCreateFlags,
+ const deUint32 tessellationControlShaderStageCreateFlags,
+ const deUint32 tessellationEvalShaderStageCreateFlags,
+ const deUint32 geometryShaderStageCreateFlags,
+ const deUint32 fragmentShaderStageCreateFlags,
+ const deUint32 requiredSubgroupSize[5]);
+
+tcu::TestStatus makeVertexFrameBufferTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResult checkResult,
+ const deUint32 vertexShaderStageCreateFlags = 0u,
+ const deUint32 requiredSubgroupSize = 0u);
+
+tcu::TestStatus makeFragmentFrameBufferTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ CheckResultFragment checkResult,
+ const deUint32 fragmentShaderStageCreateFlags = 0u,
+ const deUint32 requiredSubgroupSize = 0u);
+
+tcu::TestStatus makeComputeTestRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* inputs,
+ deUint32 inputsCount,
+ const void* internalData,
+ CheckResultCompute checkResult,
+ const deUint32 pipelineShaderStageCreateFlags,
+ const deUint32 numWorkgroups[3],
+ const deBool isRequiredSubgroupSize,
+ const deUint32 subgroupSize,
+ const deUint32 localSizesToTest[][3],
+ const deUint32 localSizesToTestCount);
+
+void supportedCheckShader (Context& context, const vk::VkShaderStageFlags shaderStage);
+
+const std::vector<vk::VkFormat> getAllRayTracingFormats();
+
+void addRayTracingNoSubgroupShader (vk::SourceCollections& programCollection);
+
+vk::VkShaderStageFlags getPossibleRayTracingSubgroupStages (Context& context, const vk::VkShaderStageFlags testedStages);
+
+tcu::TestStatus allRayTracingStages (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraData,
+ deUint32 extraDataCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStage);
+
+tcu::TestStatus allRayTracingStagesRequiredSubgroupSize (Context& context,
+ vk::VkFormat format,
+ const SSBOData* extraDatas,
+ deUint32 extraDatasCount,
+ const void* internalData,
+ const VerificationFunctor& checkResult,
+ const vk::VkShaderStageFlags shaderStageTested,
+ const deUint32 shaderStageCreateFlags[6],
+ const deUint32 requiredSubgroupSize[6]);
} // subgroups
} // vkt
{
enum OpType
{
- OPTYPE_ALL = 0,
- OPTYPE_ANY = 1,
- OPTYPE_ALLEQUAL = 2,
- OPTYPE_LAST_NON_ARB = 3,
- OPTYPE_ALL_ARB = 4,
- OPTYPE_ANY_ARB = 5,
- OPTYPE_ALLEQUAL_ARB = 6,
+ OPTYPE_ALL = 0,
+ OPTYPE_ANY = 1,
+ OPTYPE_ALLEQUAL = 2,
+ OPTYPE_LAST_NON_ARB = 3,
+ OPTYPE_ALL_ARB = 4,
+ OPTYPE_ANY_ARB = 5,
+ OPTYPE_ALLEQUAL_ARB = 6,
OPTYPE_LAST
};
-static bool checkVertexPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32)
+struct CaseDefinition
+{
+ OpType opType;
+ VkShaderStageFlags shaderStage;
+ VkFormat format;
+ de::SharedPtr<bool> geometryPointSizeSupported;
+ deBool requiredSubgroupSize;
+ deBool requires8BitUniformBuffer;
+ deBool requires16BitUniformBuffer;
+};
+
+static bool checkVertexPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::check(datas, width, 0x1F);
+
+ return subgroups::check(datas, width, 0x1F);
}
-static bool checkFragmentPipelineStages(const void* internalData, std::vector<const void*> datas,
- deUint32 width, deUint32 height, deUint32)
+static bool checkFragmentPipelineStages (const void* internalData,
+ vector<const void*> datas,
+ deUint32 width,
+ deUint32 height,
+ deUint32)
{
DE_UNREF(internalData);
- const deUint32* data =
- reinterpret_cast<const deUint32*>(datas[0]);
+
+ const deUint32* data = reinterpret_cast<const deUint32*>(datas[0]);
+
for (deUint32 x = 0u; x < width; ++x)
{
for (deUint32 y = 0u; y < height; ++y)
{
const deUint32 ndx = (x * height + y);
- deUint32 val = data[ndx] & 0x1F;
+ const deUint32 val = data[ndx] & 0x1F;
if (data[ndx] & 0x40) //Helper fragment shader invocation was executed
{
}
}
}
+
return true;
}
-static bool checkCompute(const void* internalData, std::vector<const void*> datas,
- const deUint32 numWorkgroups[3], const deUint32 localSize[3],
- deUint32)
+static bool checkCompute (const void* internalData,
+ vector<const void*> datas,
+ const deUint32 numWorkgroups[3],
+ const deUint32 localSize[3],
+ deUint32)
{
DE_UNREF(internalData);
- return vkt::subgroups::checkCompute(datas, numWorkgroups, localSize, 0x1F);
+
+ return subgroups::checkCompute(datas, numWorkgroups, localSize, 0x1F);
}
-std::string getOpTypeName(int opType)
+string getOpTypeName (int opType)
{
switch (opType)
{
- default:
- DE_FATAL("Unsupported op type");
- return "";
- case OPTYPE_ALL:
- return "subgroupAll";
- case OPTYPE_ANY:
- return "subgroupAny";
- case OPTYPE_ALLEQUAL:
- return "subgroupAllEqual";
- case OPTYPE_ALL_ARB:
- return "allInvocationsARB";
- case OPTYPE_ANY_ARB:
- return "anyInvocationARB";
- case OPTYPE_ALLEQUAL_ARB:
- return "allInvocationsEqualARB";
+ case OPTYPE_ALL: return "subgroupAll";
+ case OPTYPE_ANY: return "subgroupAny";
+ case OPTYPE_ALLEQUAL: return "subgroupAllEqual";
+ case OPTYPE_ALL_ARB: return "allInvocationsARB";
+ case OPTYPE_ANY_ARB: return "anyInvocationARB";
+ case OPTYPE_ALLEQUAL_ARB: return "allInvocationsEqualARB";
+ default: TCU_THROW(InternalError, "Unsupported op type");
}
}
-struct CaseDefinition
-{
- int opType;
- VkShaderStageFlags shaderStage;
- VkFormat format;
- de::SharedPtr<bool> geometryPointSizeSupported;
- deBool requiredSubgroupSize;
- deBool requires8BitUniformBuffer;
- deBool requires16BitUniformBuffer;
-};
-
-bool fmtIsBoolean(VkFormat format)
+bool fmtIsBoolean (VkFormat format)
{
// For reasons unknown, the tests use R8_USCALED as the boolean format
return format == VK_FORMAT_R8_USCALED || format == VK_FORMAT_R8G8_USCALED ||
format == VK_FORMAT_R8G8B8_USCALED || format == VK_FORMAT_R8G8B8A8_USCALED;
}
-const string extHeader(bool arbFunctions)
+const string getExtensions (bool arbFunctions)
{
return arbFunctions ? "#extension GL_ARB_shader_group_vote: enable\n"
"#extension GL_KHR_shader_subgroup_basic: enable\n"
: "#extension GL_KHR_shader_subgroup_vote: enable\n";
}
-// The test source to use in a generic stage. Fragment and compute sources are different
-const string stageTestSource(CaseDefinition caseDef)
+const string getStageTestSource (const CaseDefinition& caseDef)
{
- const bool formatIsBoolean = fmtIsBoolean(caseDef.format);
-
- const string op = getOpTypeName(caseDef.opType);
- const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
+ const bool formatIsBoolean = fmtIsBoolean(caseDef.format);
+ const string op = getOpTypeName(caseDef.opType);
+ const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
+ const string computePart = isAllComputeStages(caseDef.shaderStage)
+ ? op + "(data[gl_SubgroupInvocationID] > 0) ? 0x4 : 0x0"
+ : "0x4";
return
(OPTYPE_ALL == caseDef.opType || OPTYPE_ALL_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
+ " tempRes = " + op + "(true) ? 0x1 : 0;\n"
+ " tempRes |= " + op + "(false) ? 0 : 0x1A;\n"
+ " tempRes |= " + computePart + ";\n"
: (OPTYPE_ANY == caseDef.opType || OPTYPE_ANY_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
+ " tempRes = " + op + "(true) ? 0x1 : 0;\n"
+ " tempRes |= " + op + "(false) ? 0 : 0x1A;\n"
+ " tempRes |= " + computePart + ";\n"
: (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ?
" " + fmt + " valueEqual = " + fmt + "(1.25 * float(data[gl_SubgroupInvocationID]) + 5.0);\n" +
" " + fmt + " valueNoEqual = " + fmt + (formatIsBoolean ? "(subgroupElect());\n" : "(gl_SubgroupInvocationID);\n") +
- " result = " + op + "(" + fmt + "(1)) ? 0x1 : 0;\n"
- " result |= "
+ " tempRes = " + op + "(" + fmt + "(1)) ? 0x1 : 0;\n"
+ " tempRes |= "
+ (formatIsBoolean ? "0x2" : op + "(" + fmt + "(gl_SubgroupInvocationID)) ? 0 : 0x2")
+ ";\n"
- " result |= " + op + "(data[0]) ? 0x4 : 0;\n"
- " result |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
- " result |= " + op + "(valueNoEqual) ? 0x0 : 0x10;\n"
- " if (subgroupElect()) result |= 0x2 | 0x10;\n"
+ " tempRes |= " + op + "(data[0]) ? 0x4 : 0;\n"
+ " tempRes |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
+ " tempRes |= " + op + "(valueNoEqual) ? 0x0 : 0x10;\n"
+ " if (subgroupElect()) tempRes |= 0x2 | 0x10;\n"
: "";
}
void initFrameBufferPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const vk::ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- const bool formatIsBoolean = fmtIsBoolean(caseDef.format);
- const bool arbFunctions = caseDef.opType > OPTYPE_LAST_NON_ARB;
- const string extensionHeader = extHeader(arbFunctions);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != caseDef.shaderStage)
- subgroups::setFragmentShaderFrameBuffer(programCollection);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
- {
- const string vertex = "#version 450\n"
- "void main (void)\n"
- "{\n"
- " vec2 uv = vec2(float(gl_VertexIndex & 1), float((gl_VertexIndex >> 1) & 1));\n"
- " gl_Position = vec4(uv * 4.0f -2.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
- programCollection.glslSources.add("vert") << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else if (VK_SHADER_STAGE_VERTEX_BIT != caseDef.shaderStage)
- subgroups::setVertexShaderFrameBuffer(programCollection);
-
- const string source = stageTestSource(caseDef);
-
- const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- {
- std::ostringstream vertexSrc;
- vertexSrc << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extensionHeader.c_str()
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(location = 0) out vec4 out_color;\n"
- << "layout(location = 0) in highp vec4 in_position;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << fmt << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uint result;\n"
- << source
- << " out_color.r = float(result);\n"
- << " gl_Position = in_position;\n"
- << " gl_PointSize = 1.0f;\n"
- << "}\n";
-
- programCollection.glslSources.add("vert") << glu::VertexSource(vertexSrc.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- {
- std::ostringstream geometry;
-
- geometry << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extensionHeader.c_str()
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(points) in;\n"
- << "layout(points, max_vertices = 1) out;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << fmt << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uint result;\n"
- << source
- << " out_color = float(result);\n"
- << " gl_Position = gl_in[0].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << " EmitVertex();\n"
- << " EndPrimitive();\n"
- << "}\n";
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const bool arbFunctions = caseDef.opType > OPTYPE_LAST_NON_ARB;
+ const string extensions = getExtensions(arbFunctions) + subgroups::getAdditionalExtensionForFormat(caseDef.format);
+ const bool pointSize = *caseDef.geometryPointSizeSupported;
- programCollection.glslSources.add("geometry")
- << glu::GeometrySource(geometry.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- {
- std::ostringstream controlSource;
- controlSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extensionHeader.c_str()
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(vertices = 2) out;\n"
- << "layout(location = 0) out float out_color[];\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << fmt << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uint result;\n"
- << " if (gl_InvocationID == 0)\n"
- <<" {\n"
- << " gl_TessLevelOuter[0] = 1.0f;\n"
- << " gl_TessLevelOuter[1] = 1.0f;\n"
- << " }\n"
- << source
- << " out_color[gl_InvocationID] = float(result);"
- << " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "")
- << "}\n";
+ subgroups::initStdFrameBufferPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, pointSize, extensions, getStageTestSource(caseDef), "");
+}
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(controlSource.str()) << buildOptions;
- subgroups::setTesEvalShaderFrameBuffer(programCollection);
- }
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- {
- std::ostringstream evaluationSource;
- evaluationSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extensionHeader.c_str()
- << "#extension GL_EXT_tessellation_shader : require\n"
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(isolines, equal_spacing, ccw ) in;\n"
- << "layout(location = 0) out float out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << fmt << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << "\n"
- << "void main (void)\n"
- << "{\n"
- << " uint result;\n"
- << " highp uint offset = gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5);\n"
- << source
- << " out_color = float(result);\n"
- << " gl_Position = mix(gl_in[0].gl_Position, gl_in[1].gl_Position, gl_TessCoord.x);\n"
- << (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "")
- << "}\n";
+const string getStageTestSourceFrag (const CaseDefinition& caseDef)
+{
+ const bool formatIsBoolean = fmtIsBoolean(caseDef.format);
+ const string op = getOpTypeName(caseDef.opType);
+ const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
- subgroups::setTesCtrlShaderFrameBuffer(programCollection);
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(evaluationSource.str()) << buildOptions;
- }
- else if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
- {
- const string op = getOpTypeName(caseDef.opType);
- const string sourceFragment =
+ return
(OPTYPE_ALL == caseDef.opType || OPTYPE_ALL_ARB == caseDef.opType) ?
- " result |= " + op + "(!gl_HelperInvocation) ? 0x0 : 0x1;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
+ " tempRes |= " + op + "(!gl_HelperInvocation) ? 0x0 : 0x1;\n"
+ " tempRes |= " + op + "(false) ? 0 : 0x1A;\n"
+ " tempRes |= 0x4;\n"
: (OPTYPE_ANY == caseDef.opType || OPTYPE_ANY_ARB == caseDef.opType) ?
- " result |= " + op + "(gl_HelperInvocation) ? 0x1 : 0x0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
+ " tempRes |= " + op + "(gl_HelperInvocation) ? 0x1 : 0x0;\n"
+ " tempRes |= " + op + "(false) ? 0 : 0x1A;\n"
+ " tempRes |= 0x4;\n"
: (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ?
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + "(1.25 * float(data[gl_SubgroupInvocationID]) + 5.0);\n" +
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueNoEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + (formatIsBoolean ? "(subgroupElect());\n" : "(gl_SubgroupInvocationID);\n") +
- " result |= " + getOpTypeName(caseDef.opType) + "("
- + subgroups::getFormatNameForGLSL(caseDef.format) + "(1)) ? 0x10 : 0;\n"
- " result |= "
+ " " + fmt + " valueEqual = " + fmt + "(1.25 * float(data[gl_SubgroupInvocationID]) + 5.0);\n" +
+ " " + fmt + " valueNoEqual = " + fmt + (formatIsBoolean ? "(subgroupElect());\n" : "(gl_SubgroupInvocationID);\n") +
+ " tempRes |= " + getOpTypeName(caseDef.opType) + "("
+ + fmt + "(1)) ? 0x10 : 0;\n"
+ " tempRes |= "
+ (formatIsBoolean ? "0x2" : op + "(" + fmt + "(gl_SubgroupInvocationID)) ? 0 : 0x2")
+ ";\n"
- " result |= " + op + "(data[0]) ? 0x4 : 0;\n"
- " result |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
- " result |= " + op + "(gl_HelperInvocation) ? 0x0 : 0x1;\n"
- " if (subgroupElect()) result |= 0x2 | 0x10;\n"
+ " tempRes |= " + op + "(data[0]) ? 0x4 : 0;\n"
+ " tempRes |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
+ " tempRes |= " + op + "(gl_HelperInvocation) ? 0x0 : 0x1;\n"
+ " if (subgroupElect()) tempRes |= 0x2 | 0x10;\n"
: "";
-
- std::ostringstream fragmentSource;
- fragmentSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
- << extensionHeader.c_str()
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout(location = 0) out uint out_color;\n"
- << "layout(set = 0, binding = 0) uniform Buffer1\n"
- << "{\n"
- << " " << fmt << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
- << "};\n"
- << ""
- << "void main()\n"
- << "{\n"
- << " uint result = 0u;\n"
- << " if (dFdx(gl_SubgroupInvocationID * gl_FragCoord.x * gl_FragCoord.y) - dFdy(gl_SubgroupInvocationID * gl_FragCoord.x * gl_FragCoord.y) > 0.0f)\n"
- << " {\n"
- << " result |= 0x20;\n" // to be sure that compiler doesn't remove dFdx and dFdy executions
- << " }\n"
- << (arbFunctions ?
- " bool helper = anyInvocationARB(gl_HelperInvocation);\n" :
- " bool helper = subgroupAny(gl_HelperInvocation);\n")
- << " if (helper)\n"
- << " {\n"
- << " result |= 0x40;\n"
- << " }\n"
- << sourceFragment
- << " out_color = result;\n"
- << "}\n";
-
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragmentSource.str())<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
- else
- {
- DE_FATAL("Unsupported shader stage");
- }
}
-void initPrograms(SourceCollections& programCollection, CaseDefinition caseDef)
+void initFrameBufferProgramsFrag (SourceCollections& programCollection, CaseDefinition caseDef)
{
- const bool formatIsBoolean = fmtIsBoolean(caseDef.format);
- const bool arbFunctions = caseDef.opType > OPTYPE_LAST_NON_ARB;
- const string extensionHeader = extHeader(arbFunctions);
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const bool arbFunctions = caseDef.opType > OPTYPE_LAST_NON_ARB;
+ const string extensions = getExtensions(arbFunctions) + subgroups::getAdditionalExtensionForFormat(caseDef.format);
- const string op = getOpTypeName(caseDef.opType);
- const string fmt = subgroups::getFormatNameForGLSL(caseDef.format);
+ DE_ASSERT(VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage);
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
{
- std::ostringstream src;
+ const string vertex =
+ "#version 450\n"
+ "void main (void)\n"
+ "{\n"
+ " vec2 uv = vec2(float(gl_VertexIndex & 1), float((gl_VertexIndex >> 1) & 1));\n"
+ " gl_Position = vec4(uv * 4.0f -2.0f, 0.0f, 1.0f);\n"
+ " gl_PointSize = 1.0f;\n"
+ "}\n";
- const string source =
- (OPTYPE_ALL == caseDef.opType || OPTYPE_ALL_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= " + op + "(data[gl_SubgroupInvocationID] > 0) ? 0x4 : 0;\n"
- : (OPTYPE_ANY == caseDef.opType || OPTYPE_ANY_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= " + op + "(data[gl_SubgroupInvocationID] == data[0]) ? 0x4 : 0;\n"
- : (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ?
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + "(1.25 * float(data[gl_SubgroupInvocationID]) + 5.0);\n"
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueNoEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + (formatIsBoolean ? "(subgroupElect());\n" : "(gl_SubgroupInvocationID);\n") +
- " result = " + getOpTypeName(caseDef.opType) + "("
- + subgroups::getFormatNameForGLSL(caseDef.format) + "(1)) ? 0x1 : 0;\n"
- " result |= "
- + (formatIsBoolean ? "0x2" : op + "(" + fmt + "(gl_SubgroupInvocationID)) ? 0 : 0x2")
- + ";\n"
- " result |= " + op + "(data[0]) ? 0x4 : 0x0;\n"
- " result |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
- " result |= " + op + "(valueNoEqual) ? 0x0 : 0x10;\n"
- " if (subgroupElect()) result |= 0x2 | 0x10;\n"
- : "";
+ programCollection.glslSources.add("vert") << glu::VertexSource(vertex) << buildOptions;
+ }
- src << "#version 450\n"
- << extensionHeader.c_str()
- << subgroups::getAdditionalExtensionForFormat(caseDef.format)
- << "layout (local_size_x_id = 0, local_size_y_id = 1, "
- "local_size_z_id = 2) in;\n"
- << "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- << "{\n"
- << " uint res[];\n"
- << "};\n"
- << "layout(set = 0, binding = 1, std430) buffer Buffer2\n"
+ {
+ ostringstream fragmentSource;
+
+ fragmentSource << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450)<<"\n"
+ << extensions
+ << "layout(location = 0) out uint out_color;\n"
+ << "layout(set = 0, binding = 0) uniform Buffer1\n"
<< "{\n"
- << " " << fmt << " data[];\n"
+ << " " << subgroups::getFormatNameForGLSL(caseDef.format) << " data[" << subgroups::maxSupportedSubgroupSize() << "];\n"
<< "};\n"
- << "\n"
- << "void main (void)\n"
+ << ""
+ << "void main()\n"
<< "{\n"
- << " uint result;\n"
- << " uvec3 globalSize = gl_NumWorkGroups * gl_WorkGroupSize;\n"
- << " highp uint offset = globalSize.x * ((globalSize.y * "
- "gl_GlobalInvocationID.z) + gl_GlobalInvocationID.y) + "
- "gl_GlobalInvocationID.x;\n"
- << source
- << " res[offset] = result;\n"
+ << " uint tempRes = 0u;\n"
+ << " if (dFdx(gl_SubgroupInvocationID * gl_FragCoord.x * gl_FragCoord.y) - dFdy(gl_SubgroupInvocationID * gl_FragCoord.x * gl_FragCoord.y) > 0.0f)\n"
+ << " {\n"
+ << " tempRes |= 0x20;\n" // to be sure that compiler doesn't remove dFdx and dFdy executions
+ << " }\n"
+ << (arbFunctions ?
+ " bool helper = anyInvocationARB(gl_HelperInvocation);\n" :
+ " bool helper = subgroupAny(gl_HelperInvocation);\n")
+ << " if (helper)\n"
+ << " {\n"
+ << " tempRes |= 0x40;\n"
+ << " }\n"
+ << getStageTestSourceFrag(caseDef)
+ << " out_color = tempRes;\n"
<< "}\n";
- programCollection.glslSources.add("comp")
- << glu::ComputeSource(src.str()) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
+ programCollection.glslSources.add("fragment") << glu::FragmentSource(fragmentSource.str())<< buildOptions;
}
- else
- {
- const string source = stageTestSource(caseDef);
-
- {
- const string vertex =
- "#version 450\n"
- + extensionHeader
- + subgroups::getAdditionalExtensionForFormat(caseDef.format) +
- "layout(set = 0, binding = 0, std430) buffer Buffer1\n"
- "{\n"
- " uint res[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + fmt + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uint result;\n"
- " highp uint offset = gl_VertexIndex;\n"
- + source +
- " res[offset] = result;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " float pixelPosition = pixelSize/2.0f - 1.0f;\n"
- " gl_Position = vec4(float(gl_VertexIndex) * pixelSize + pixelPosition, 0.0f, 0.0f, 1.0f);\n"
- " gl_PointSize = 1.0f;\n"
- "}\n";
- programCollection.glslSources.add("vert")
- << glu::VertexSource(vertex) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string tesc =
- "#version 450\n"
- + extensionHeader
- + subgroups::getAdditionalExtensionForFormat(caseDef.format) +
- "layout(vertices=1) out;\n"
- "layout(set = 0, binding = 1, std430) buffer Buffer1\n"
- "{\n"
- " uint res[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + fmt + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uint result;\n"
- " highp uint offset = gl_PrimitiveID;\n"
- + source +
- " res[offset] = result;\n"
- " if (gl_InvocationID == 0)\n"
- " {\n"
- " gl_TessLevelOuter[0] = 1.0f;\n"
- " gl_TessLevelOuter[1] = 1.0f;\n"
- " }\n"
- " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_out[gl_InvocationID].gl_PointSize = gl_in[gl_InvocationID].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tesc")
- << glu::TessellationControlSource(tesc) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string tese =
- "#version 450\n"
- + extensionHeader
- + subgroups::getAdditionalExtensionForFormat(caseDef.format) +
- "layout(isolines) in;\n"
- "layout(set = 0, binding = 2, std430) buffer Buffer1\n"
- "{\n"
- " uint res[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + fmt + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uint result;\n"
- " highp uint offset = gl_PrimitiveID * 2 + uint(gl_TessCoord.x + 0.5);\n"
- + source +
- " res[offset] = result;\n"
- " float pixelSize = 2.0f/1024.0f;\n"
- " gl_Position = gl_in[0].gl_Position + gl_TessCoord.x * pixelSize / 2.0f;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- "}\n";
-
- programCollection.glslSources.add("tese")
- << glu::TessellationEvaluationSource(tese) << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
-
- {
- const string geometry =
- "#version 450\n"
- + extensionHeader
- + subgroups::getAdditionalExtensionForFormat(caseDef.format) +
- "layout(${TOPOLOGY}) in;\n"
- "layout(points, max_vertices = 1) out;\n"
- "layout(set = 0, binding = 3, std430) buffer Buffer1\n"
- "{\n"
- " uint res[];\n"
- "};\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + fmt + " data[];\n"
- "};\n"
- "\n"
- "void main (void)\n"
- "{\n"
- " uint result;\n"
- " highp uint offset = gl_PrimitiveIDIn;\n"
- + source +
- " res[offset] = result;\n"
- " gl_Position = gl_in[0].gl_Position;\n"
- + (*caseDef.geometryPointSizeSupported ? " gl_PointSize = gl_in[0].gl_PointSize;\n" : "") +
- " EmitVertex();\n"
- " EndPrimitive();\n"
- "}\n";
-
- subgroups::addGeometryShadersFromTemplate(geometry, vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u),
- programCollection.glslSources);
- }
+}
- {
- const string sourceFragment =
- (OPTYPE_ALL == caseDef.opType || OPTYPE_ALL_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
- : (OPTYPE_ANY == caseDef.opType || OPTYPE_ANY_ARB == caseDef.opType) ?
- " result = " + op + "(true) ? 0x1 : 0;\n"
- " result |= " + op + "(false) ? 0 : 0x1A;\n"
- " result |= 0x4;\n"
- : (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ?
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + "(1.25 * float(data[gl_SubgroupInvocationID]) + 5.0);\n" +
- " " + subgroups::getFormatNameForGLSL(caseDef.format) + " valueNoEqual = " + subgroups::getFormatNameForGLSL(caseDef.format) + (formatIsBoolean ? "(subgroupElect());\n" : "(gl_SubgroupInvocationID);\n") +
- " result = " + getOpTypeName(caseDef.opType) + "("
- + subgroups::getFormatNameForGLSL(caseDef.format) + "(1)) ? 0x1 : 0;\n"
- " result |= "
- + (formatIsBoolean ? "0x2" : op + "(" + fmt + "(gl_SubgroupInvocationID)) ? 0 : 0x2")
- + ";\n"
- " result |= " + op + "(data[0]) ? 0x4 : 0;\n"
- " result |= " + op + "(valueEqual) ? 0x8 : 0x0;\n"
- " result |= " + op + "(valueNoEqual) ? 0x0 : 0x10;\n"
- " if (subgroupElect()) result |= 0x2 | 0x10;\n"
- : "";
- const string fragment =
- "#version 450\n"
- + extensionHeader
- + subgroups::getAdditionalExtensionForFormat(caseDef.format) +
- "layout(location = 0) out uint result;\n"
- "layout(set = 0, binding = 4, std430) readonly buffer Buffer2\n"
- "{\n"
- " " + fmt + " data[];\n"
- "};\n"
- "void main (void)\n"
- "{\n"
- + sourceFragment +
- "}\n";
-
- programCollection.glslSources.add("fragment")
- << glu::FragmentSource(fragment)<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_3, 0u);
- }
+void initPrograms (SourceCollections& programCollection, CaseDefinition caseDef)
+{
+ const SpirvVersion spirvVersion = isAllRayTracingStages(caseDef.shaderStage) ? SPIRV_VERSION_1_4 : SPIRV_VERSION_1_3;
+ const ShaderBuildOptions buildOptions (programCollection.usedVulkanVersion, spirvVersion, 0u);
+ const bool arbFunctions = caseDef.opType > OPTYPE_LAST_NON_ARB;
+ const string extensions = getExtensions(arbFunctions) + subgroups::getAdditionalExtensionForFormat(caseDef.format);
+ const bool pointSize = *caseDef.geometryPointSizeSupported;
- subgroups::addNoSubgroupShader(programCollection);
- }
+ subgroups::initStdPrograms(programCollection, buildOptions, caseDef.shaderStage, caseDef.format, pointSize, extensions, getStageTestSource(caseDef), "");
}
void supportedCheck (Context& context, CaseDefinition caseDef)
if (caseDef.requiredSubgroupSize)
{
- if (!context.requireDeviceFunctionality("VK_EXT_subgroup_size_control"))
- TCU_THROW(NotSupportedError, "Device does not support VK_EXT_subgroup_size_control extension");
- VkPhysicalDeviceSubgroupSizeControlFeaturesEXT subgroupSizeControlFeatures;
- subgroupSizeControlFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_FEATURES_EXT;
- subgroupSizeControlFeatures.pNext = DE_NULL;
-
- VkPhysicalDeviceFeatures2 features;
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
- features.pNext = &subgroupSizeControlFeatures;
+ context.requireDeviceFunctionality("VK_EXT_subgroup_size_control");
- context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ const VkPhysicalDeviceSubgroupSizeControlFeaturesEXT& subgroupSizeControlFeatures = context.getSubgroupSizeControlFeaturesEXT();
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
if (subgroupSizeControlFeatures.subgroupSizeControl == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support varying subgroup sizes nor required subgroup size");
if (subgroupSizeControlFeatures.computeFullSubgroups == DE_FALSE)
TCU_THROW(NotSupportedError, "Device does not support full subgroups in compute shaders");
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
if ((subgroupSizeControlProperties.requiredSubgroupSizeStages & caseDef.shaderStage) != caseDef.shaderStage)
TCU_THROW(NotSupportedError, "Required subgroup size is not supported for shader stage");
}
*caseDef.geometryPointSizeSupported = subgroups::isTessellationAndGeometryPointSizeSupported(context);
- vkt::subgroups::supportedCheckShader(context, caseDef.shaderStage);
+ subgroups::supportedCheckShader(context, caseDef.shaderStage);
}
-tcu::TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
+TestStatus noSSBOtest (Context& context, const CaseDefinition caseDef)
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(
- context, caseDef.shaderStage))
- {
- if (subgroups::areSubgroupOperationsRequiredForStage(
- caseDef.shaderStage))
- {
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
- else
- {
- TCU_THROW(NotSupportedError, "Device does not support subgroup operations for this stage");
- }
- }
-
if (caseDef.opType > OPTYPE_LAST_NON_ARB)
{
context.requireDeviceFunctionality("VK_EXT_shader_subgroup_vote");
}
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd140;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ? subgroups::SSBOData::InitializeZero : subgroups::SSBOData::InitializeNonZero;
-
- if (VK_SHADER_STAGE_VERTEX_BIT == caseDef.shaderStage)
- return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_GEOMETRY_BIT == caseDef.shaderStage)
- return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
- else if (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT);
- else if (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT == caseDef.shaderStage)
- return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT);
- else if (VK_SHADER_STAGE_FRAGMENT_BIT == caseDef.shaderStage)
- return subgroups::makeFragmentFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkFragmentPipelineStages);
- else
- TCU_THROW(InternalError, "Unhandled shader stage");
+ const subgroups::SSBOData::InputDataInitializeType initializeType = (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType)
+ ? subgroups::SSBOData::InitializeZero
+ : subgroups::SSBOData::InitializeNonZero;
+ const subgroups::SSBOData inputData
+ {
+ initializeType, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd140, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
+
+ switch (caseDef.shaderStage)
+ {
+ case VK_SHADER_STAGE_VERTEX_BIT: return subgroups::makeVertexFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_GEOMETRY_BIT: return subgroups::makeGeometryFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages);
+ case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT: return subgroups::makeTessellationEvaluationFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, caseDef.shaderStage);
+ case VK_SHADER_STAGE_FRAGMENT_BIT: return subgroups::makeFragmentFrameBufferTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkFragmentPipelineStages);
+ default: TCU_THROW(InternalError, "Unhandled shader stage");
+ }
}
-tcu::TestStatus test(Context& context, const CaseDefinition caseDef)
+TestStatus test (Context& context, const CaseDefinition caseDef)
{
- if (VK_SHADER_STAGE_COMPUTE_BIT == caseDef.shaderStage)
+ const subgroups::SSBOData::InputDataInitializeType initializeType = (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType)
+ ? subgroups::SSBOData::InitializeZero
+ : subgroups::SSBOData::InitializeNonZero;
+
+ if (isAllComputeStages(caseDef.shaderStage))
{
- if (!subgroups::areSubgroupOperationsSupportedForStage(context, caseDef.shaderStage))
+ const VkPhysicalDeviceSubgroupSizeControlPropertiesEXT& subgroupSizeControlProperties = context.getSubgroupSizeControlPropertiesEXT();
+ TestLog& log = context.getTestContext().getLog();
+ const subgroups::SSBOData inputData
{
- return tcu::TestStatus::fail(
- "Shader stage " +
- subgroups::getShaderStageName(caseDef.shaderStage) +
- " is required to support subgroup operations!");
- }
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ? subgroups::SSBOData::InitializeZero : subgroups::SSBOData::InitializeNonZero;
+ initializeType, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ };
if (caseDef.requiredSubgroupSize == DE_FALSE)
- return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData,
- 1, DE_NULL, checkCompute);
-
- tcu::TestLog& log = context.getTestContext().getLog();
- VkPhysicalDeviceSubgroupSizeControlPropertiesEXT subgroupSizeControlProperties;
- subgroupSizeControlProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_SIZE_CONTROL_PROPERTIES_EXT;
- subgroupSizeControlProperties.pNext = DE_NULL;
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupSizeControlProperties;
+ return subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute);
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- log << tcu::TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
- << subgroupSizeControlProperties.maxSubgroupSize << "]" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "Testing required subgroup size range [" << subgroupSizeControlProperties.minSubgroupSize << ", "
+ << subgroupSizeControlProperties.maxSubgroupSize << "]" << TestLog::EndMessage;
// According to the spec, requiredSubgroupSize must be a power-of-two integer.
for (deUint32 size = subgroupSizeControlProperties.minSubgroupSize; size <= subgroupSizeControlProperties.maxSubgroupSize; size *= 2)
{
- tcu::TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute,
- size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+ TestStatus result = subgroups::makeComputeTest(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkCompute, size, VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT);
+
if (result.getCode() != QP_TEST_RESULT_PASS)
{
- log << tcu::TestLog::Message << "subgroupSize " << size << " failed" << tcu::TestLog::EndMessage;
+ log << TestLog::Message << "subgroupSize " << size << " failed" << TestLog::EndMessage;
return result;
}
}
- return tcu::TestStatus::pass("OK");
+
+ return TestStatus::pass("OK");
}
- else
+ else if (isAllGraphicsStages(caseDef.shaderStage))
{
- VkPhysicalDeviceSubgroupProperties subgroupProperties;
- subgroupProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES;
- subgroupProperties.pNext = DE_NULL;
-
- VkPhysicalDeviceProperties2 properties;
- properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- properties.pNext = &subgroupProperties;
-
- context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
-
- VkShaderStageFlagBits stages = (VkShaderStageFlagBits)(caseDef.shaderStage & subgroupProperties.supportedStages);
-
- if (VK_SHADER_STAGE_FRAGMENT_BIT != stages && !subgroups::isVertexSSBOSupportedForDevice(context))
+ const VkShaderStageFlags stages = subgroups::getPossibleGraphicsSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
{
- if ( (stages & VK_SHADER_STAGE_FRAGMENT_BIT) == 0)
- TCU_THROW(NotSupportedError, "Device does not support vertex stage SSBO writes");
- else
- stages = VK_SHADER_STAGE_FRAGMENT_BIT;
- }
-
- if ((VkShaderStageFlagBits)0u == stages)
- TCU_THROW(NotSupportedError, "Subgroup operations are not supported for any graphic shader");
-
- subgroups::SSBOData inputData;
- inputData.format = caseDef.format;
- inputData.layout = subgroups::SSBOData::LayoutStd430;
- inputData.numElements = subgroups::maxSupportedSubgroupSize();
- inputData.initializeType = (OPTYPE_ALLEQUAL == caseDef.opType || OPTYPE_ALLEQUAL_ARB == caseDef.opType) ? subgroups::SSBOData::InitializeZero : subgroups::SSBOData::InitializeNonZero;
- inputData.binding = 4u;
- inputData.stages = stages;
+ initializeType, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 4u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
return subgroups::allStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
}
+ else if (isAllRayTracingStages(caseDef.shaderStage))
+ {
+ const VkShaderStageFlags stages = subgroups::getPossibleRayTracingSubgroupStages(context, caseDef.shaderStage);
+ const subgroups::SSBOData inputData =
+ {
+ initializeType, // InputDataInitializeType initializeType;
+ subgroups::SSBOData::LayoutStd430, // InputDataLayoutType layout;
+ caseDef.format, // vk::VkFormat format;
+ subgroups::maxSupportedSubgroupSize(), // vk::VkDeviceSize numElements;
+ false, // bool isImage;
+ 6u, // deUint32 binding;
+ stages, // vk::VkShaderStageFlags stages;
+ };
+
+ return subgroups::allRayTracingStages(context, VK_FORMAT_R32_UINT, &inputData, 1, DE_NULL, checkVertexPipelineStages, stages);
+ }
+ else
+ TCU_THROW(InternalError, "Unknown stage or invalid stage set");
}
}
{
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsVoteTests(tcu::TestContext& testCtx)
+TestCaseGroup* createSubgroupsVoteTests (TestContext& testCtx)
{
- de::MovePtr<tcu::TestCaseGroup> graphicGroup(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroup(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup arithmetic category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroup(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
-
- de::MovePtr<tcu::TestCaseGroup> fragHelperGroup(new tcu::TestCaseGroup(
- testCtx, "frag_helper", "Subgroup arithmetic category tests: fragment helper invocation"));
-
- de::MovePtr<tcu::TestCaseGroup> graphicGroupARB(new tcu::TestCaseGroup(
- testCtx, "graphics", "Subgroup arithmetic category tests: graphics"));
- de::MovePtr<tcu::TestCaseGroup> computeGroupARB(new tcu::TestCaseGroup(
- testCtx, "compute", "Subgroup arithmetic category tests: compute"));
- de::MovePtr<tcu::TestCaseGroup> framebufferGroupARB(new tcu::TestCaseGroup(
- testCtx, "framebuffer", "Subgroup arithmetic category tests: framebuffer"));
-
- de::MovePtr<tcu::TestCaseGroup> fragHelperGroupARB(new tcu::TestCaseGroup(
- testCtx, "frag_helper", "Subgroup arithmetic category tests: fragment helper invocation"));
-
- const VkShaderStageFlags stages[] =
+ de::MovePtr<TestCaseGroup> group (new TestCaseGroup(testCtx, "vote", "Subgroup vote category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroup (new TestCaseGroup(testCtx, "graphics", "Subgroup vote category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroup (new TestCaseGroup(testCtx, "compute", "Subgroup vote category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroup (new TestCaseGroup(testCtx, "framebuffer", "Subgroup vote category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> fragHelperGroup (new TestCaseGroup(testCtx, "frag_helper", "Subgroup vote category tests: fragment helper invocation"));
+ de::MovePtr<TestCaseGroup> raytracingGroup (new TestCaseGroup(testCtx, "ray_tracing", "Subgroup vote category tests: raytracing"));
+
+ de::MovePtr<TestCaseGroup> groupARB (new TestCaseGroup(testCtx, "ext_shader_subgroup_vote", "VK_EXT_shader_subgroup_vote category tests"));
+ de::MovePtr<TestCaseGroup> graphicGroupARB (new TestCaseGroup(testCtx, "graphics", "Subgroup vote category tests: graphics"));
+ de::MovePtr<TestCaseGroup> computeGroupARB (new TestCaseGroup(testCtx, "compute", "Subgroup vote category tests: compute"));
+ de::MovePtr<TestCaseGroup> framebufferGroupARB (new TestCaseGroup(testCtx, "framebuffer", "Subgroup vote category tests: framebuffer"));
+ de::MovePtr<TestCaseGroup> fragHelperGroupARB (new TestCaseGroup(testCtx, "frag_helper", "Subgroup vote category tests: fragment helper invocation"));
+ const deBool boolValues[] =
{
- VK_SHADER_STAGE_VERTEX_BIT,
- VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
- VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
- VK_SHADER_STAGE_GEOMETRY_BIT,
+ DE_FALSE,
+ DE_TRUE
};
- const std::vector<VkFormat> formats = subgroups::getAllFormats();
-
- for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- const VkFormat format = formats[formatIndex];
- const deBool formatIsNotVector = VK_FORMAT_R8_USCALED == format || VK_FORMAT_R32_UINT == format ||
- VK_FORMAT_R32_SINT == format || VK_FORMAT_R32_SFLOAT == format || VK_FORMAT_R64_SFLOAT == format;
-
- for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
+ const VkShaderStageFlags stages[] =
+ {
+ VK_SHADER_STAGE_VERTEX_BIT,
+ VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
+ VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT,
+ VK_SHADER_STAGE_GEOMETRY_BIT,
+ };
+ const vector<VkFormat> formats = subgroups::getAllFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
{
- // Skip OPTYPE_LAST_NON_ARB because it is not a real op type.
- if (opTypeIndex == OPTYPE_LAST_NON_ARB)
- continue;
-
- // Skip the non-nonvector tests because VK_EXT_shader_subgroup_vote functions only supports boolean scalar arguments.
- if (opTypeIndex > OPTYPE_LAST_NON_ARB && !formatIsNotVector)
- continue;
-
- // Skip non-boolean formats when testing allInvocationsEqualARB(bool value), because it requires a boolean
- // argument that should have the same value for all invocations. For the rest of formats, it won't be a boolean argument,
- // so it may give wrong results when converting to bool.
- if (opTypeIndex == OPTYPE_ALLEQUAL_ARB && format != VK_FORMAT_R8_USCALED)
- continue;
-
- // Skip the typed tests for all but subgroupAllEqual() and allInvocationsEqualARB()
- if ((VK_FORMAT_R32_UINT != format) && (OPTYPE_ALLEQUAL != opTypeIndex) && (OPTYPE_ALLEQUAL_ARB != opTypeIndex))
+ const VkFormat format = formats[formatIndex];
+ const bool needs8BitUBOStorage = isFormat8bitTy(format);
+ const bool needs16BitUBOStorage = isFormat16BitTy(format);
+ const deBool formatIsNotVector = format == VK_FORMAT_R8_USCALED
+ || format == VK_FORMAT_R32_UINT
+ || format == VK_FORMAT_R32_SINT
+ || format == VK_FORMAT_R32_SFLOAT
+ || format == VK_FORMAT_R64_SFLOAT;
+
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST; ++opTypeIndex)
{
- continue;
- }
+ const OpType opType = static_cast<OpType>(opTypeIndex);
- const std::string op = de::toLower(getOpTypeName(opTypeIndex));
+ // Skip OPTYPE_LAST_NON_ARB because it is not a real op type.
+ if (opType == OPTYPE_LAST_NON_ARB)
+ continue;
- {
- CaseDefinition caseDef = { opTypeIndex, VK_SHADER_STAGE_COMPUTE_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE, deBool(false),deBool(false) };
- if (opTypeIndex < OPTYPE_LAST_NON_ARB)
- {
- addFunctionCaseWithPrograms(computeGroup.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format),
- "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroup.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format) + "_requiredsubgroupsize",
- "", supportedCheck, initPrograms, test, caseDef);
- }
- else
+ // Skip the non-nonvector tests because VK_EXT_shader_subgroup_vote functions only supports boolean scalar arguments.
+ if (opType > OPTYPE_LAST_NON_ARB && !formatIsNotVector)
+ continue;
+
+ // Skip non-boolean formats when testing allInvocationsEqualARB(bool value), because it requires a boolean
+ // argument that should have the same value for all invocations. For the rest of formats, it won't be a boolean argument,
+ // so it may give wrong results when converting to bool.
+ if (opType == OPTYPE_ALLEQUAL_ARB && format != VK_FORMAT_R8_USCALED)
+ continue;
+
+ // Skip the typed tests for all but subgroupAllEqual() and allInvocationsEqualARB()
+ if ((VK_FORMAT_R32_UINT != format) && (OPTYPE_ALLEQUAL != opType) && (OPTYPE_ALLEQUAL_ARB != opType))
{
- addFunctionCaseWithPrograms(computeGroupARB.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format),
- "", supportedCheck, initPrograms, test, caseDef);
- caseDef.requiredSubgroupSize = DE_TRUE;
- addFunctionCaseWithPrograms(computeGroupARB.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format) + "_requiredsubgroupsize",
- "", supportedCheck, initPrograms, test, caseDef);
+ continue;
}
- }
- {
- const CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_ALL_GRAPHICS, format, de::SharedPtr<bool>(new bool), DE_FALSE, deBool(false),deBool(false) };
- if (opTypeIndex < OPTYPE_LAST_NON_ARB)
+ const string op = de::toLower(getOpTypeName(opType));
+ const string name = op + "_" + subgroups::getFormatNameForGLSL(format);
+ TestCaseGroup* computeGroupPtr = (opType < OPTYPE_LAST_NON_ARB) ? computeGroup.get() : computeGroupARB.get();
+ TestCaseGroup* graphicGroupPtr = (opType < OPTYPE_LAST_NON_ARB) ? graphicGroup.get() : graphicGroupARB.get();
+ TestCaseGroup* framebufferGroupPtr = (opType < OPTYPE_LAST_NON_ARB) ? framebufferGroup.get() : framebufferGroupARB.get();
+ TestCaseGroup* fragHelperGroupPtr = (opType < OPTYPE_LAST_NON_ARB) ? fragHelperGroup.get() : fragHelperGroupARB.get();
+
+ for (size_t groupSizeNdx = 0; groupSizeNdx < DE_LENGTH_OF_ARRAY(boolValues); ++groupSizeNdx)
{
- addFunctionCaseWithPrograms(graphicGroup.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format),
- "", supportedCheck, initPrograms, test, caseDef);
+ const deBool requiredSubgroupSize = boolValues[groupSizeNdx];
+ const string testName = name + (requiredSubgroupSize ? "_requiredsubgroupsize" : "");
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ requiredSubgroupSize, // deBool requiredSubgroupSize;
+ deBool(false), // deBool requires8BitUniformBuffer;
+ deBool(false) // deBool requires16BitUniformBuffer;
+ };
+
+ addFunctionCaseWithPrograms(computeGroupPtr, testName, "", supportedCheck, initPrograms, test, caseDef);
}
- else
+
{
- addFunctionCaseWithPrograms(graphicGroupARB.get(),
- op + "_" + subgroups::getFormatNameForGLSL(format),
- "", supportedCheck, initPrograms, test, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_ALL_GRAPHICS, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ deBool(false), // deBool requires8BitUniformBuffer;
+ deBool(false) // deBool requires16BitUniformBuffer;
+ };
+
+ addFunctionCaseWithPrograms(graphicGroupPtr, name, "", supportedCheck, initPrograms, test, caseDef);
}
- }
- for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
- {
- const CaseDefinition caseDef = {opTypeIndex, stages[stageIndex], format, de::SharedPtr<bool>(new bool), DE_FALSE, deBool(false),deBool(false) };
- if (opTypeIndex < OPTYPE_LAST_NON_ARB)
+ for (int stageIndex = 0; stageIndex < DE_LENGTH_OF_ARRAY(stages); ++stageIndex)
{
- addFunctionCaseWithPrograms(framebufferGroup.get(),
- op + "_" +
- subgroups::getFormatNameForGLSL(format)
- + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ stages[stageIndex], // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ deBool(false), // deBool requires8BitUniformBuffer;
+ deBool(false) // deBool requires16BitUniformBuffer;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(framebufferGroupPtr, testName, "", supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
}
- else
+
{
- addFunctionCaseWithPrograms(framebufferGroupARB.get(),
- op + "_" +
- subgroups::getFormatNameForGLSL(format)
- + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ deBool(needs8BitUBOStorage), // deBool requires8BitUniformBuffer;
+ deBool(needs16BitUBOStorage) // deBool requires16BitUniformBuffer;
+ };
+ const string testName = name + "_" + getShaderStageName(caseDef.shaderStage);
+
+ addFunctionCaseWithPrograms(fragHelperGroupPtr, testName, "", supportedCheck, initFrameBufferProgramsFrag, noSSBOtest, caseDef);
}
}
+ }
+ }
- bool needs8BitUBOStorage = isFormat8bitTy(format);
- bool needs16BitUBOStorage = isFormat16BitTy(format);
- const CaseDefinition caseDef = {opTypeIndex, VK_SHADER_STAGE_FRAGMENT_BIT, format, de::SharedPtr<bool>(new bool), DE_FALSE, deBool(needs8BitUBOStorage),deBool(needs16BitUBOStorage) };
- if (opTypeIndex < OPTYPE_LAST_NON_ARB)
- {
- addFunctionCaseWithPrograms(fragHelperGroup.get(),
- op + "_" +
- subgroups::getFormatNameForGLSL(format)
- + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
- }
- else
+ {
+ const vector<VkFormat> formats = subgroups::getAllRayTracingFormats();
+
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ++formatIndex)
+ {
+ const VkFormat format = formats[formatIndex];
+
+ for (int opTypeIndex = 0; opTypeIndex < OPTYPE_LAST_NON_ARB; ++opTypeIndex)
{
- addFunctionCaseWithPrograms(fragHelperGroupARB.get(),
- op + "_" +
- subgroups::getFormatNameForGLSL(format)
- + "_" + getShaderStageName(caseDef.shaderStage), "",
- supportedCheck, initFrameBufferPrograms, noSSBOtest, caseDef);
+ const OpType opType = static_cast<OpType>(opTypeIndex);
+
+ // Skip the typed tests for all but subgroupAllEqual()
+ if ((VK_FORMAT_R32_UINT != format) && (OPTYPE_ALLEQUAL != opType))
+ {
+ continue;
+ }
+
+ const string op = de::toLower(getOpTypeName(opType));
+ const string name = op + "_" + subgroups::getFormatNameForGLSL(format);
+ const CaseDefinition caseDef =
+ {
+ opType, // OpType opType;
+ SHADER_STAGE_ALL_RAY_TRACING, // VkShaderStageFlags shaderStage;
+ format, // VkFormat format;
+ de::SharedPtr<bool>(new bool), // de::SharedPtr<bool> geometryPointSizeSupported;
+ DE_FALSE, // deBool requiredSubgroupSize;
+ DE_FALSE, // deBool requires8BitUniformBuffer;
+ DE_FALSE // deBool requires16BitUniformBuffer;
+ };
+
+ addFunctionCaseWithPrograms(raytracingGroup.get(), name, "", supportedCheck, initPrograms, test, caseDef);
}
}
}
- de::MovePtr<tcu::TestCaseGroup> groupARB(new tcu::TestCaseGroup(
- testCtx, "ext_shader_subgroup_vote", "VK_EXT_shader_subgroup_vote category tests"));
-
groupARB->addChild(graphicGroupARB.release());
groupARB->addChild(computeGroupARB.release());
groupARB->addChild(framebufferGroupARB.release());
groupARB->addChild(fragHelperGroupARB.release());
- de::MovePtr<tcu::TestCaseGroup> group(new tcu::TestCaseGroup(
- testCtx, "vote", "Subgroup vote category tests"));
-
group->addChild(graphicGroup.release());
group->addChild(computeGroup.release());
group->addChild(framebufferGroup.release());
group->addChild(fragHelperGroup.release());
+ group->addChild(raytracingGroup.release());
group->addChild(groupARB.release());
namespace subgroups
{
-tcu::TestCaseGroup* createSubgroupsVoteTests(tcu::TestContext& testCtx);
+tcu::TestCaseGroup* createSubgroupsVoteTests (tcu::TestContext& testCtx);
} // subgroups
} // vkt
bool isDevicePropertyInitialized (VkStructureType sType) const { return m_deviceProperties.isDevicePropertyInitialized(sType); }
const VkPhysicalDeviceProperties& getDeviceProperties (void) const { return m_deviceProperties.getCoreProperties2().properties; }
const VkPhysicalDeviceProperties2& getDeviceProperties2 (void) const { return m_deviceProperties.getCoreProperties2(); }
- const VkPhysicalDeviceVulkan11Properties& getVulkan11Properties (void) const { return m_deviceProperties.getVulkan11Properties(); }
- const VkPhysicalDeviceVulkan12Properties& getVulkan12Properties (void) const { return m_deviceProperties.getVulkan12Properties(); }
+ const VkPhysicalDeviceVulkan11Properties& getDeviceVulkan11Properties (void) const { return m_deviceProperties.getVulkan11Properties(); }
+ const VkPhysicalDeviceVulkan12Properties& getDeviceVulkan12Properties (void) const { return m_deviceProperties.getVulkan12Properties(); }
#include "vkDevicePropertiesForDefaultDeviceDefs.inl"
{
}
-deUint32 Context::getMaximumFrameworkVulkanVersion (void) const { return m_device->getMaximumFrameworkVulkanVersion(); }
-deUint32 Context::getAvailableInstanceVersion (void) const { return m_device->getAvailableInstanceVersion(); }
-const vector<string>& Context::getInstanceExtensions (void) const { return m_device->getInstanceExtensions(); }
-vk::VkInstance Context::getInstance (void) const { return m_device->getInstance(); }
-const vk::InstanceInterface& Context::getInstanceInterface (void) const { return m_device->getInstanceInterface(); }
-vk::VkPhysicalDevice Context::getPhysicalDevice (void) const { return m_device->getPhysicalDevice(); }
-deUint32 Context::getDeviceVersion (void) const { return m_device->getDeviceVersion(); }
-const vk::VkPhysicalDeviceFeatures& Context::getDeviceFeatures (void) const { return m_device->getDeviceFeatures(); }
-const vk::VkPhysicalDeviceFeatures2& Context::getDeviceFeatures2 (void) const { return m_device->getDeviceFeatures2(); }
+deUint32 Context::getMaximumFrameworkVulkanVersion (void) const { return m_device->getMaximumFrameworkVulkanVersion(); }
+deUint32 Context::getAvailableInstanceVersion (void) const { return m_device->getAvailableInstanceVersion(); }
+const vector<string>& Context::getInstanceExtensions (void) const { return m_device->getInstanceExtensions(); }
+vk::VkInstance Context::getInstance (void) const { return m_device->getInstance(); }
+const vk::InstanceInterface& Context::getInstanceInterface (void) const { return m_device->getInstanceInterface(); }
+vk::VkPhysicalDevice Context::getPhysicalDevice (void) const { return m_device->getPhysicalDevice(); }
+deUint32 Context::getDeviceVersion (void) const { return m_device->getDeviceVersion(); }
+const vk::VkPhysicalDeviceFeatures& Context::getDeviceFeatures (void) const { return m_device->getDeviceFeatures(); }
+const vk::VkPhysicalDeviceFeatures2& Context::getDeviceFeatures2 (void) const { return m_device->getDeviceFeatures2(); }
+const vk::VkPhysicalDeviceVulkan11Features& Context::getDeviceVulkan11Features (void) const { return m_device->getVulkan11Features(); }
+const vk::VkPhysicalDeviceVulkan12Features& Context::getDeviceVulkan12Features (void) const { return m_device->getVulkan12Features(); }
bool Context::isDeviceFunctionalitySupported (const std::string& extension) const
{
#include "vkDeviceFeaturesForContextDefs.inl"
-const vk::VkPhysicalDeviceProperties& Context::getDeviceProperties (void) const { return m_device->getDeviceProperties(); }
-const vk::VkPhysicalDeviceProperties2& Context::getDeviceProperties2 (void) const { return m_device->getDeviceProperties2(); }
+const vk::VkPhysicalDeviceProperties& Context::getDeviceProperties (void) const { return m_device->getDeviceProperties(); }
+const vk::VkPhysicalDeviceProperties2& Context::getDeviceProperties2 (void) const { return m_device->getDeviceProperties2(); }
+const vk::VkPhysicalDeviceVulkan11Properties& Context::getDeviceVulkan11Properties (void) const { return m_device->getDeviceVulkan11Properties(); }
+const vk::VkPhysicalDeviceVulkan12Properties& Context::getDeviceVulkan12Properties (void) const { return m_device->getDeviceVulkan12Properties(); }
#include "vkDevicePropertiesForContextDefs.inl"
class Context
{
public:
- Context (tcu::TestContext& testCtx,
- const vk::PlatformInterface& platformInterface,
- vk::BinaryCollection& progCollection);
- ~Context (void);
+ Context (tcu::TestContext& testCtx,
+ const vk::PlatformInterface& platformInterface,
+ vk::BinaryCollection& progCollection);
+ ~Context (void);
- tcu::TestContext& getTestContext (void) const { return m_testCtx; }
- const vk::PlatformInterface& getPlatformInterface (void) const { return m_platformInterface; }
- vk::BinaryCollection& getBinaryCollection (void) const { return m_progCollection; }
+ tcu::TestContext& getTestContext (void) const { return m_testCtx; }
+ const vk::PlatformInterface& getPlatformInterface (void) const { return m_platformInterface; }
+ vk::BinaryCollection& getBinaryCollection (void) const { return m_progCollection; }
// Default instance & device, selected with --deqp-vk-device-id=N
- deUint32 getMaximumFrameworkVulkanVersion (void) const;
- deUint32 getAvailableInstanceVersion (void) const;
- const std::vector<std::string>& getInstanceExtensions (void) const;
- vk::VkInstance getInstance (void) const;
- const vk::InstanceInterface& getInstanceInterface (void) const;
- vk::VkPhysicalDevice getPhysicalDevice (void) const;
- deUint32 getDeviceVersion (void) const;
- bool isDeviceFeatureInitialized (vk::VkStructureType sType) const;
- const vk::VkPhysicalDeviceFeatures& getDeviceFeatures (void) const;
- const vk::VkPhysicalDeviceFeatures2& getDeviceFeatures2 (void) const;
-
- bool isInstanceFunctionalitySupported (const std::string& extension) const;
- bool isDeviceFunctionalitySupported (const std::string& extension) const;
+ deUint32 getMaximumFrameworkVulkanVersion (void) const;
+ deUint32 getAvailableInstanceVersion (void) const;
+ const std::vector<std::string>& getInstanceExtensions (void) const;
+ vk::VkInstance getInstance (void) const;
+ const vk::InstanceInterface& getInstanceInterface (void) const;
+ vk::VkPhysicalDevice getPhysicalDevice (void) const;
+ deUint32 getDeviceVersion (void) const;
+ bool isDeviceFeatureInitialized (vk::VkStructureType sType) const;
+ const vk::VkPhysicalDeviceFeatures& getDeviceFeatures (void) const;
+ const vk::VkPhysicalDeviceFeatures2& getDeviceFeatures2 (void) const;
+ const vk::VkPhysicalDeviceVulkan11Features& getDeviceVulkan11Features (void) const;
+ const vk::VkPhysicalDeviceVulkan12Features& getDeviceVulkan12Features (void) const;
+
+ bool isInstanceFunctionalitySupported (const std::string& extension) const;
+ bool isDeviceFunctionalitySupported (const std::string& extension) const;
#include "vkDeviceFeaturesForContextDecl.inl"
- bool isDevicePropertyInitialized (vk::VkStructureType sType) const;
- const vk::VkPhysicalDeviceProperties& getDeviceProperties (void) const;
- const vk::VkPhysicalDeviceProperties2& getDeviceProperties2 (void) const;
+ bool isDevicePropertyInitialized (vk::VkStructureType sType) const;
+ const vk::VkPhysicalDeviceProperties& getDeviceProperties (void) const;
+ const vk::VkPhysicalDeviceProperties2& getDeviceProperties2 (void) const;
+ const vk::VkPhysicalDeviceVulkan11Properties& getDeviceVulkan11Properties (void) const;
+ const vk::VkPhysicalDeviceVulkan12Properties& getDeviceVulkan12Properties (void) const;
#include "vkDevicePropertiesForContextDecl.inl"
- const std::vector<std::string>& getDeviceExtensions (void) const;
- vk::VkDevice getDevice (void) const;
- const vk::DeviceInterface& getDeviceInterface (void) const;
- deUint32 getUniversalQueueFamilyIndex (void) const;
- vk::VkQueue getUniversalQueue (void) const;
- deUint32 getUsedApiVersion (void) const;
- deUint32 getSparseQueueFamilyIndex (void) const;
- vk::VkQueue getSparseQueue (void) const;
- vk::Allocator& getDefaultAllocator (void) const;
- bool contextSupports (const deUint32 majorNum, const deUint32 minorNum, const deUint32 patchNum) const;
- bool contextSupports (const vk::ApiVersion version) const;
- bool contextSupports (const deUint32 requiredApiVersionBits) const;
- bool requireDeviceFunctionality (const std::string& required) const;
- bool requireInstanceFunctionality (const std::string& required) const;
- bool requireDeviceCoreFeature (const DeviceCoreFeature requiredDeviceCoreFeature);
+ const std::vector<std::string>& getDeviceExtensions (void) const;
+ vk::VkDevice getDevice (void) const;
+ const vk::DeviceInterface& getDeviceInterface (void) const;
+ deUint32 getUniversalQueueFamilyIndex (void) const;
+ vk::VkQueue getUniversalQueue (void) const;
+ deUint32 getUsedApiVersion (void) const;
+ deUint32 getSparseQueueFamilyIndex (void) const;
+ vk::VkQueue getSparseQueue (void) const;
+ vk::Allocator& getDefaultAllocator (void) const;
+ bool contextSupports (const deUint32 majorNum, const deUint32 minorNum, const deUint32 patchNum) const;
+ bool contextSupports (const vk::ApiVersion version) const;
+ bool contextSupports (const deUint32 requiredApiVersionBits) const;
+ bool requireDeviceFunctionality (const std::string& required) const;
+ bool requireInstanceFunctionality (const std::string& required) const;
+ bool requireDeviceCoreFeature (const DeviceCoreFeature requiredDeviceCoreFeature);
- void* getInstanceProcAddr ();
+ void* getInstanceProcAddr ();
- bool isBufferDeviceAddressSupported (void) const;
+ bool isBufferDeviceAddressSupported (void) const;
- bool resultSetOnValidation () const { return m_resultSetOnValidation; }
- void resultSetOnValidation (bool value) { m_resultSetOnValidation = value; }
+ bool resultSetOnValidation () const { return m_resultSetOnValidation; }
+ void resultSetOnValidation (bool value) { m_resultSetOnValidation = value; }
- bool hasDebugReportRecorder () const;
- vk::DebugReportRecorder& getDebugReportRecorder () const;
+ bool hasDebugReportRecorder () const;
+ vk::DebugReportRecorder& getDebugReportRecorder () const;
protected:
- tcu::TestContext& m_testCtx;
- const vk::PlatformInterface& m_platformInterface;
- vk::BinaryCollection& m_progCollection;
+ tcu::TestContext& m_testCtx;
+ const vk::PlatformInterface& m_platformInterface;
+ vk::BinaryCollection& m_progCollection;
- const de::UniquePtr<DefaultDevice> m_device;
- const de::UniquePtr<vk::Allocator> m_allocator;
+ const de::UniquePtr<DefaultDevice> m_device;
+ const de::UniquePtr<vk::Allocator> m_allocator;
- bool m_resultSetOnValidation;
+ bool m_resultSetOnValidation;
private:
Context (const Context&); // Not allowed
dEQP-VK.subgroups.builtin_var.compute.numsubgroups_requiredsubgroupsize
dEQP-VK.subgroups.builtin_var.compute.subgroupid
dEQP-VK.subgroups.builtin_var.compute.subgroupid_requiredsubgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupsize
+dEQP-VK.subgroups.builtin_var.ray_tracing.subgroupinvocationid
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_vertex
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_tess_eval
dEQP-VK.subgroups.builtin_var.framebuffer.subgroupsize_tess_control
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_tess_eval
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_tess_control
dEQP-VK.subgroups.builtin_mask_var.framebuffer.subgroupltmask_geometry
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupeqmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupgtmask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgrouplemask
+dEQP-VK.subgroups.builtin_mask_var.ray_tracing.subgroupltmask
dEQP-VK.subgroups.basic.graphics.subgroupelect
dEQP-VK.subgroups.basic.graphics.subgroupbarrier
dEQP-VK.subgroups.basic.graphics.subgroupmemorybarrier
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_tess_eval
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_tess_control
dEQP-VK.subgroups.basic.framebuffer.subgroupmemorybarrierimage_geometry
+dEQP-VK.subgroups.basic.ray_tracing.subgroupelect
+dEQP-VK.subgroups.basic.ray_tracing.subgroupbarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrier
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierbuffer
+dEQP-VK.subgroups.basic.ray_tracing.subgroupmemorybarrierimage
dEQP-VK.subgroups.vote.graphics.subgroupallequal_int8_t
dEQP-VK.subgroups.vote.graphics.subgroupallequal_i8vec2
dEQP-VK.subgroups.vote.graphics.subgroupallequal_i8vec3
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec2_fragment
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec3_fragment
dEQP-VK.subgroups.vote.frag_helper.subgroupallequal_bvec4_fragment
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i8vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint8_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u8vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i16vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint16_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_ivec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupall_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupany_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_i64vec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_uint64_t
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_u64vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_f16vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_float
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_vec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_double
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_dvec4
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bool
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec2
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec3
+dEQP-VK.subgroups.vote.ray_tracing.subgroupallequal_bvec4
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.allinvocationsarb_uint
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.anyinvocationarb_uint
dEQP-VK.subgroups.vote.ext_shader_subgroup_vote.graphics.allinvocationsequalarb_bool
dEQP-VK.subgroups.ballot.framebuffer.tess_control
dEQP-VK.subgroups.ballot.framebuffer.geometry
dEQP-VK.subgroups.ballot.framebuffer.vertex
+dEQP-VK.subgroups.ballot.ray_tracing.test
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.graphics.graphic
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.compute.compute
dEQP-VK.subgroups.ballot.ext_shader_subgroup_ballot.compute.compute_requiredsubgroupsize
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4tess_eval
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4tess_control
dEQP-VK.subgroups.ballot_broadcast.framebuffer.subgroupbroadcastfirst_bvec4geometry
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i8vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint8_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u8vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i16vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint16_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_ivec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_i64vec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_uint64_t
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_u64vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_f16vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_float
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_vec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_double
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_dvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bool
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec2
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec3
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.ballot_broadcast.ray_tracing.subgroupbroadcastfirst_bvec4
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcast_int
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcast_nonconst_int
dEQP-VK.subgroups.ballot_broadcast.ext_shader_subgroup_ballot.graphics.subgroupbroadcastfirst_int
dEQP-VK.subgroups.ballot_other.graphics.subgroupballotfindlsb
dEQP-VK.subgroups.ballot_other.graphics.subgroupballotfindmsb
dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot
-dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupinverseballot_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitextract_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotbitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotinclusivebitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotexclusivebitcount_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindlsb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb
-dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupSize
+dEQP-VK.subgroups.ballot_other.compute.subgroupballotfindmsb_requiredsubgroupsize
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_vertex
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_tess_eval
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupinverseballot_tess_control
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_tess_eval
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_tess_control
dEQP-VK.subgroups.ballot_other.framebuffer.subgroupballotfindmsb_geometry
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupinverseballot
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitextract
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotbitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotinclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotexclusivebitcount
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindlsb
+dEQP-VK.subgroups.ballot_other.ray_tracing.subgroupballotfindmsb
dEQP-VK.subgroups.arithmetic.graphics.subgroupadd_int8_t
dEQP-VK.subgroups.arithmetic.graphics.subgroupmul_int8_t
dEQP-VK.subgroups.arithmetic.graphics.subgroupmin_int8_t
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_tess_eval
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_tess_control
dEQP-VK.subgroups.arithmetic.framebuffer.subgroupexclusivexor_bvec4_geometry
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.arithmetic.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredadd_int8_t
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredmul_int8_t
dEQP-VK.subgroups.clustered.graphics.subgroupclusteredmin_int8_t
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_tess_eval
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_tess_control
dEQP-VK.subgroups.clustered.framebuffer.subgroupclusteredxor_bvec4_geometry
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i8vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint8_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u8vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i16vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint16_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_ivec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_i64vec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_uint64_t
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_u64vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_f16vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_float
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_vec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_double
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredadd_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmul_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmin_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredmax_dvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bool
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec2
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec3
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredand_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredor_bvec4
+dEQP-VK.subgroups.clustered.ray_tracing.subgroupclusteredxor_bvec4
dEQP-VK.subgroups.partitioned.graphics.subgroupadd_int8_t
dEQP-VK.subgroups.partitioned.graphics.subgroupmul_int8_t
dEQP-VK.subgroups.partitioned.graphics.subgroupmin_int8_t
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_tess_eval
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_tess_control
dEQP-VK.subgroups.partitioned.framebuffer.subgroupexclusivexor_bvec4_geometry
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i8vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint8_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u8vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i16vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint16_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_ivec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_i64vec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_uint64_t
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_u64vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_f16vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_float
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_vec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_double
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupmax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveadd_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemul_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemin_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivemax_dvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bool
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec2
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec3
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupxor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupinclusivexor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveand_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusiveor_bvec4
+dEQP-VK.subgroups.partitioned.ray_tracing.subgroupexclusivexor_bvec4
dEQP-VK.subgroups.shuffle.graphics.subgroupshuffle_int8_t
dEQP-VK.subgroups.shuffle.graphics.subgroupshufflexor_int8_t
dEQP-VK.subgroups.shuffle.graphics.subgroupshuffleup_int8_t
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_tess_eval
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_tess_control
dEQP-VK.subgroups.shuffle.framebuffer.subgroupshuffledown_bvec4_geometry
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i8vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint8_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u8vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i16vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint16_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_ivec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_i64vec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_uint64_t
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_u64vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_f16vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_float
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_vec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_double
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_dvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bool
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec2
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec3
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffle_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshufflexor_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffleup_bvec4
+dEQP-VK.subgroups.shuffle.ray_tracing.subgroupshuffledown_bvec4
dEQP-VK.subgroups.quad.graphics.subgroupquadbroadcast_int8_t
dEQP-VK.subgroups.quad.graphics.subgroupquadbroadcast_nonconst_int8_t
dEQP-VK.subgroups.quad.graphics.subgroupquadswaphorizontal_int8_t
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_tess_eval
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_tess_control
dEQP-VK.subgroups.quad.framebuffer.subgroupquadswapdiagonal_bvec4_geometry
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i8vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint8_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u8vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i16vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint16_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_ivec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_i64vec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_uint64_t
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_u64vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_f16vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_float
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_vec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_double
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_dvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bool
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec2
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec3
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadbroadcast_nonconst_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswaphorizontal_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapvertical_bvec4
+dEQP-VK.subgroups.quad.ray_tracing.subgroupquadswapdiagonal_bvec4
dEQP-VK.subgroups.shape.graphics.clustered
dEQP-VK.subgroups.shape.graphics.quad
dEQP-VK.subgroups.shape.compute.clustered
dEQP-VK.subgroups.shape.framebuffer.quad_tess_eval
dEQP-VK.subgroups.shape.framebuffer.quad_tess_control
dEQP-VK.subgroups.shape.framebuffer.quad_geometry
+dEQP-VK.subgroups.shape.ray_tracing.clustered
+dEQP-VK.subgroups.shape.ray_tracing.quad
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupeqmaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupgemaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.graphics.gl_subgroupgtmaskarb
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_tess_eval
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_tess_control
dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.framebuffer.gl_subgroupltmaskarb_geometry
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupeqmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupgtmaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgrouplemaskarb
+dEQP-VK.subgroups.ballot_mask.ext_shader_subgroup_ballot.ray_tracing.gl_subgroupltmaskarb
dEQP-VK.subgroups.size_control.generic.subgroup_size_properties
dEQP-VK.subgroups.size_control.graphics.allow_varying_subgroup_size
dEQP-VK.subgroups.size_control.graphics.required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.geometry_required_subgroup_size_min
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_max
dEQP-VK.subgroups.size_control.framebuffer.fragment_required_subgroup_size_min
+dEQP-VK.subgroups.size_control.ray_tracing.allow_varying_subgroup_size
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_max
+dEQP-VK.subgroups.size_control.ray_tracing.required_subgroup_size_min
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_optimal
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_optimal_array
dEQP-VK.ycbcr.format.g8b8g8r8_422_unorm.vertex_linear