From 7833759a419018f8eb80701a8d89d5c054728bf9 Mon Sep 17 00:00:00 2001 From: Rhys Perry Date: Fri, 7 Jan 2022 16:05:20 +0000 Subject: [PATCH] radv: avoid providing a write_mask to intrinsic builders Signed-off-by: Rhys Perry Reviewed-by: Emma Anholt Part-of: --- src/amd/vulkan/radv_acceleration_structure.c | 63 +++++++++++++--------------- src/amd/vulkan/radv_meta_buffer.c | 7 ++-- src/amd/vulkan/radv_meta_clear.c | 3 +- src/amd/vulkan/radv_meta_copy_vrs_htile.c | 8 ++-- src/amd/vulkan/radv_pipeline_rt.c | 17 ++++---- src/amd/vulkan/radv_query.c | 30 ++++++------- 6 files changed, 56 insertions(+), 72 deletions(-) diff --git a/src/amd/vulkan/radv_acceleration_structure.c b/src/amd/vulkan/radv_acceleration_structure.c index edc3156..1c9a189 100644 --- a/src/amd/vulkan/radv_acceleration_structure.c +++ b/src/amd/vulkan/radv_acceleration_structure.c @@ -1012,12 +1012,11 @@ build_leaf_shader(struct radv_device *dev) for (unsigned i = 0; i < 4; ++i) { nir_build_store_global(&b, nir_vec(&b, node_data + i * 4, 4), nir_iadd(&b, triangle_node_dst_addr, nir_imm_int64(&b, i * 16)), - .write_mask = 15, .align_mul = 16, .align_offset = 0); + .align_mul = 16, .align_offset = 0); } nir_ssa_def *node_id = nir_ushr(&b, node_offset, nir_imm_int(&b, 3)); - nir_build_store_global(&b, node_id, scratch_addr, .write_mask = 1, .align_mul = 4, - .align_offset = 0); + nir_build_store_global(&b, node_id, scratch_addr, .align_mul = 4, .align_offset = 0); } nir_push_else(&b, NULL); nir_push_if(&b, nir_ieq(&b, geom_type, nir_imm_int(&b, VK_GEOMETRY_TYPE_AABBS_KHR))); @@ -1030,8 +1029,7 @@ build_leaf_shader(struct radv_device *dev) nir_ssa_def *aabb_node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset)); nir_ssa_def *node_id = nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 7)); - nir_build_store_global(&b, node_id, scratch_addr, .write_mask = 1, .align_mul = 4, - .align_offset = 0); + nir_build_store_global(&b, node_id, scratch_addr, .align_mul = 4, .align_offset = 0); aabb_addr = nir_iadd(&b, aabb_addr, nir_u2u64(&b, nir_imul(&b, aabb_stride, global_id))); @@ -1053,10 +1051,10 @@ build_leaf_shader(struct radv_device *dev) nir_build_store_global(&b, nir_vec(&b, values + 0, 4), nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 0)), - .write_mask = 15, .align_mul = 16, .align_offset = 0); + .align_mul = 16, .align_offset = 0); nir_build_store_global(&b, nir_vec(&b, values + 4, 4), nir_iadd(&b, aabb_node_dst_addr, nir_imm_int64(&b, 16)), - .write_mask = 15, .align_mul = 16, .align_offset = 0); + .align_mul = 16, .align_offset = 0); } nir_push_else(&b, NULL); { /* Instances */ @@ -1096,8 +1094,7 @@ build_leaf_shader(struct radv_device *dev) node_dst_addr = nir_iadd(&b, node_dst_addr, nir_u2u64(&b, node_offset)); nir_ssa_def *node_id = nir_iadd(&b, nir_ushr(&b, node_offset, nir_imm_int(&b, 3)), nir_imm_int(&b, 6)); - nir_build_store_global(&b, node_id, scratch_addr, .write_mask = 1, .align_mul = 4, - .align_offset = 0); + nir_build_store_global(&b, node_id, scratch_addr, .align_mul = 4, .align_offset = 0); nir_variable *bounds[2] = { nir_variable_create(b.shader, nir_var_shader_temp, vec3_type, "min_bound"), @@ -1144,7 +1141,7 @@ build_leaf_shader(struct radv_device *dev) nir_build_store_global(&b, nir_vec(&b, vals, 3), nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 92 + 12 * i)), - .write_mask = 0x7, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); } nir_ssa_def *m_in[3][3], *m_out[3][3], *m_vec[3][4]; @@ -1161,7 +1158,7 @@ build_leaf_shader(struct radv_device *dev) for (unsigned i = 0; i < 3; ++i) { nir_build_store_global(&b, nir_vec(&b, m_vec[i], 4), nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 16 * i)), - .write_mask = 0xf, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); } nir_ssa_def *out0[4] = { @@ -1169,17 +1166,17 @@ build_leaf_shader(struct radv_device *dev) nir_channel(&b, nir_unpack_64_2x32(&b, header_addr), 1), nir_channel(&b, inst3, 0), nir_channel(&b, inst3, 1)}; nir_build_store_global(&b, nir_vec(&b, out0, 4), - nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)), .write_mask = 0xf, - .align_mul = 4, .align_offset = 0); + nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)), .align_mul = 4, + .align_offset = 0); nir_build_store_global(&b, global_id, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 88)), - .write_mask = 0x1, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); nir_pop_if(&b, NULL); nir_build_store_global(&b, nir_load_var(&b, bounds[0]), - nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 64)), .write_mask = 0x7, - .align_mul = 4, .align_offset = 0); + nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 64)), .align_mul = 4, + .align_offset = 0); nir_build_store_global(&b, nir_load_var(&b, bounds[1]), - nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 76)), .write_mask = 0x7, - .align_mul = 4, .align_offset = 0); + nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 76)), .align_mul = 4, + .align_offset = 0); } nir_pop_if(&b, NULL); nir_pop_if(&b, NULL); @@ -1307,7 +1304,7 @@ build_internal_shader(struct radv_device *dev) .align_mul = 4, .align_offset = 0); nir_build_store_global(&b, src_nodes, nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 0)), - .write_mask = 0xf, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); nir_ssa_def *total_bounds[2] = { nir_channels(&b, nir_imm_vec4(&b, NAN, NAN, NAN, NAN), 7), @@ -1327,10 +1324,10 @@ build_internal_shader(struct radv_device *dev) nir_pop_if(&b, NULL); nir_build_store_global(&b, nir_load_var(&b, bounds[0]), nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 16 + 24 * i)), - .write_mask = 0x7, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); nir_build_store_global(&b, nir_load_var(&b, bounds[1]), nir_iadd(&b, node_dst_addr, nir_imm_int64(&b, 28 + 24 * i)), - .write_mask = 0x7, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); total_bounds[0] = nir_fmin(&b, total_bounds[0], nir_load_var(&b, bounds[0])); total_bounds[1] = nir_fmax(&b, total_bounds[1], nir_load_var(&b, bounds[1])); } @@ -1340,16 +1337,14 @@ build_internal_shader(struct radv_device *dev) nir_ssa_def *dst_scratch_addr = nir_iadd( &b, scratch_addr, nir_u2u64(&b, nir_iadd(&b, dst_scratch_offset, nir_ishl(&b, global_id, nir_imm_int(&b, 2))))); - nir_build_store_global(&b, node_id, dst_scratch_addr, .write_mask = 1, .align_mul = 4, - .align_offset = 0); + nir_build_store_global(&b, node_id, dst_scratch_addr, .align_mul = 4, .align_offset = 0); nir_push_if(&b, fill_header); - nir_build_store_global(&b, node_id, node_addr, .write_mask = 1, .align_mul = 4, - .align_offset = 0); + nir_build_store_global(&b, node_id, node_addr, .align_mul = 4, .align_offset = 0); nir_build_store_global(&b, total_bounds[0], nir_iadd(&b, node_addr, nir_imm_int64(&b, 8)), - .write_mask = 7, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); nir_build_store_global(&b, total_bounds[1], nir_iadd(&b, node_addr, nir_imm_int64(&b, 20)), - .write_mask = 7, .align_mul = 4, .align_offset = 0); + .align_mul = 4, .align_offset = 0); nir_pop_if(&b, NULL); return b.shader; } @@ -1452,19 +1447,19 @@ build_copy_shader(struct radv_device *dev) nir_iadd(&b, dst_base_addr, nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header, serialization_size))), - .write_mask = 0x1, .align_mul = 8, .align_offset = 0); + .align_mul = 8, .align_offset = 0); nir_build_store_global( &b, compacted_size, nir_iadd(&b, dst_base_addr, nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header, compacted_size))), - .write_mask = 0x1, .align_mul = 8, .align_offset = 0); + .align_mul = 8, .align_offset = 0); nir_build_store_global( &b, nir_u2u64(&b, instance_count), nir_iadd(&b, dst_base_addr, nir_imm_int64(&b, offsetof(struct radv_accel_struct_serialization_header, instance_count))), - .write_mask = 0x1, .align_mul = 8, .align_offset = 0); + .align_mul = 8, .align_offset = 0); } nir_pop_if(&b, NULL); } @@ -1567,8 +1562,8 @@ build_copy_shader(struct radv_device *dev) nir_imm_int(&b, sizeof(struct radv_accel_struct_serialization_header))); instance_addr = nir_iadd(&b, dst_base_addr, nir_u2u64(&b, instance_addr)); - nir_build_store_global(&b, nir_channels(&b, value, 3), instance_addr, - .write_mask = 3, .align_mul = 8, .align_offset = 0); + nir_build_store_global(&b, nir_channels(&b, value, 3), instance_addr, .align_mul = 8, + .align_offset = 0); } nir_push_else(&b, NULL); { @@ -1597,8 +1592,8 @@ build_copy_shader(struct radv_device *dev) nir_store_var(&b, offset_var, nir_iadd(&b, offset, increment), 1); - nir_build_store_global(&b, nir_load_var(&b, value_var), dst_addr, .write_mask = 0xf, - .align_mul = 16, .align_offset = 0); + nir_build_store_global(&b, nir_load_var(&b, value_var), dst_addr, .align_mul = 16, + .align_offset = 0); } nir_push_else(&b, NULL); { diff --git a/src/amd/vulkan/radv_meta_buffer.c b/src/amd/vulkan/radv_meta_buffer.c index 8fc28e6..c239980 100644 --- a/src/amd/vulkan/radv_meta_buffer.c +++ b/src/amd/vulkan/radv_meta_buffer.c @@ -20,8 +20,8 @@ build_buffer_fill_shader(struct radv_device *dev) nir_ssa_def *load = nir_load_push_constant(&b, 1, 32, nir_imm_int(&b, 0), .range = 4); nir_ssa_def *swizzled_load = nir_swizzle(&b, load, (unsigned[]){0, 0, 0, 0}, 4); - nir_store_ssbo(&b, swizzled_load, dst_buf, offset, .write_mask = 0xf, - .access = ACCESS_NON_READABLE, .align_mul = 16); + nir_store_ssbo(&b, swizzled_load, dst_buf, offset, .access = ACCESS_NON_READABLE, + .align_mul = 16); return b.shader; } @@ -41,8 +41,7 @@ build_buffer_copy_shader(struct radv_device *dev) nir_ssa_def *src_buf = radv_meta_load_descriptor(&b, 0, 1); nir_ssa_def *load = nir_load_ssbo(&b, 4, 32, src_buf, offset, .align_mul = 16); - nir_store_ssbo(&b, load, dst_buf, offset, .write_mask = 0xf, .access = ACCESS_NON_READABLE, - .align_mul = 16); + nir_store_ssbo(&b, load, dst_buf, offset, .access = ACCESS_NON_READABLE, .align_mul = 16); return b.shader; } diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c index 3a85fe2..e130883 100644 --- a/src/amd/vulkan/radv_meta_clear.c +++ b/src/amd/vulkan/radv_meta_clear.c @@ -1076,8 +1076,7 @@ build_clear_htile_mask_shader() nir_ssa_def *data = nir_iand(&b, load, nir_channel(&b, constants, 1)); data = nir_ior(&b, data, nir_channel(&b, constants, 0)); - nir_store_ssbo(&b, data, buf, offset, .write_mask = 0xf, .access = ACCESS_NON_READABLE, - .align_mul = 16); + nir_store_ssbo(&b, data, buf, offset, .access = ACCESS_NON_READABLE, .align_mul = 16); return b.shader; } diff --git a/src/amd/vulkan/radv_meta_copy_vrs_htile.c b/src/amd/vulkan/radv_meta_copy_vrs_htile.c index a5f3dd7..b50a92f 100644 --- a/src/amd/vulkan/radv_meta_copy_vrs_htile.c +++ b/src/amd/vulkan/radv_meta_copy_vrs_htile.c @@ -99,10 +99,10 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf * VRS rate X = min(value >> 2, 1) * VRS rate Y = min(value & 3, 1) */ - nir_ssa_def *x_rate = nir_ushr(&b, &tex->dest.ssa, nir_imm_int(&b, 2)); + nir_ssa_def *x_rate = nir_ushr(&b, nir_channel(&b, &tex->dest.ssa, 0), nir_imm_int(&b, 2)); x_rate = nir_umin(&b, x_rate, nir_imm_int(&b, 1)); - nir_ssa_def *y_rate = nir_iand(&b, &tex->dest.ssa, nir_imm_int(&b, 3)); + nir_ssa_def *y_rate = nir_iand(&b, nir_channel(&b, &tex->dest.ssa, 0), nir_imm_int(&b, 3)); y_rate = nir_umin(&b, y_rate, nir_imm_int(&b, 1)); /* Compute the final VRS rate. */ @@ -133,8 +133,8 @@ build_copy_vrs_htile_shader(struct radv_device *device, struct radeon_surf *surf nir_ssa_def *output_value = nir_ior(&b, nir_load_var(&b, htile_value), vrs_rates); /* Store the updated HTILE 32-bit which contains the VRS rates. */ - nir_store_ssbo(&b, output_value, htile_buf, htile_addr, .write_mask = 0x1, - .access = ACCESS_NON_READABLE, .align_mul = 4); + nir_store_ssbo(&b, output_value, htile_buf, htile_addr, .access = ACCESS_NON_READABLE, + .align_mul = 4); return b.shader; } diff --git a/src/amd/vulkan/radv_pipeline_rt.c b/src/amd/vulkan/radv_pipeline_rt.c index 35cede5..6a97afc 100644 --- a/src/amd/vulkan/radv_pipeline_rt.c +++ b/src/amd/vulkan/radv_pipeline_rt.c @@ -422,8 +422,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca nir_imm_int(&b_shader, size)), 1); nir_store_scratch(&b_shader, nir_imm_int(&b_shader, ret), - nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16, - .write_mask = 1); + nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16); nir_store_var(&b_shader, vars->stack_ptr, nir_iadd(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), @@ -449,8 +448,7 @@ lower_rt_instructions(nir_shader *shader, struct rt_variables *vars, unsigned ca nir_imm_int(&b_shader, size)), 1); nir_store_scratch(&b_shader, nir_imm_int(&b_shader, ret), - nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16, - .write_mask = 1); + nir_load_var(&b_shader, vars->stack_ptr), .align_mul = 16); nir_store_var(&b_shader, vars->stack_ptr, nir_iadd(&b_shader, nir_load_var(&b_shader, vars->stack_ptr), @@ -1291,7 +1289,7 @@ insert_traversal_triangle_case(struct radv_device *device, nir_store_scratch( b, ij, nir_iadd(b, nir_load_var(b, vars->stack_ptr), nir_imm_int(b, RADV_HIT_ATTRIB_OFFSET)), - .align_mul = 16, .write_mask = 3); + .align_mul = 16); nir_store_var(b, vars->ahit_status, nir_imm_int(b, 0), 1); @@ -1845,8 +1843,8 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf nir_store_var(b, trav_vars.instance_addr, nir_imm_int64(b, 0), 1); nir_store_var(b, trav_vars.stack, nir_iadd(b, stack_base, stack_entry_stride_def), 1); - nir_store_shared(b, bvh_root, stack_base, .base = 0, .write_mask = 0x1, - .align_mul = stack_entry_size, .align_offset = 0); + nir_store_shared(b, bvh_root, stack_base, .base = 0, .align_mul = stack_entry_size, + .align_offset = 0); nir_store_var(b, trav_vars.top_stack, nir_imm_int(b, 0), 1); @@ -1931,7 +1929,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf 1); nir_store_shared(b, nir_iand(b, nir_channel(b, instance_data, 0), nir_imm_int(b, 63)), - nir_load_var(b, trav_vars.stack), .base = 0, .write_mask = 0x1, + nir_load_var(b, trav_vars.stack), .base = 0, .align_mul = stack_entry_size, .align_offset = 0); nir_store_var(b, trav_vars.stack, nir_iadd(b, nir_load_var(b, trav_vars.stack), stack_entry_stride_def), @@ -1971,8 +1969,7 @@ insert_traversal(struct radv_device *device, const VkRayTracingPipelineCreateInf nir_push_if(b, nir_ine(b, new_node, nir_imm_int(b, 0xffffffff))); { nir_store_shared(b, new_node, nir_load_var(b, trav_vars.stack), .base = 0, - .write_mask = 0x1, .align_mul = stack_entry_size, - .align_offset = 0); + .align_mul = stack_entry_size, .align_offset = 0); nir_store_var( b, trav_vars.stack, nir_iadd(b, nir_load_var(b, trav_vars.stack), stack_entry_stride_def), 1); diff --git a/src/amd/vulkan/radv_query.c b/src/amd/vulkan/radv_query.c index d2558d2..b48eb8d 100644 --- a/src/amd/vulkan/radv_query.c +++ b/src/amd/vulkan/radv_query.c @@ -63,12 +63,11 @@ radv_store_availability(nir_builder *b, nir_ssa_def *flags, nir_ssa_def *dst_buf nir_push_if(b, nir_test_flag(b, flags, VK_QUERY_RESULT_64_BIT)); - nir_store_ssbo(b, nir_vec2(b, value32, nir_imm_int(b, 0)), dst_buf, offset, .write_mask = 0x3, - .align_mul = 8); + nir_store_ssbo(b, nir_vec2(b, value32, nir_imm_int(b, 0)), dst_buf, offset, .align_mul = 8); nir_push_else(b, NULL); - nir_store_ssbo(b, value32, dst_buf, offset, .write_mask = 0x1, .align_mul = 4); + nir_store_ssbo(b, value32, dst_buf, offset, .align_mul = 4); nir_pop_if(b, NULL); @@ -191,13 +190,12 @@ build_occlusion_query_shader(struct radv_device *device) nir_push_if(&b, result_is_64bit); - nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1, - .align_mul = 8); + nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .align_mul = 8); nir_push_else(&b, NULL); nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base, - .write_mask = 0x1, .align_mul = 8); + .align_mul = 8); nir_pop_if(&b, NULL); nir_pop_if(&b, NULL); @@ -305,13 +303,12 @@ build_pipeline_statistics_query_shader(struct radv_device *device) /* Store result */ nir_push_if(&b, result_is_64bit); - nir_store_ssbo(&b, result, dst_buf, nir_load_var(&b, output_offset), .write_mask = 0x1, - .align_mul = 8); + nir_store_ssbo(&b, result, dst_buf, nir_load_var(&b, output_offset), .align_mul = 8); nir_push_else(&b, NULL); nir_store_ssbo(&b, nir_u2u32(&b, result), dst_buf, nir_load_var(&b, output_offset), - .write_mask = 0x1, .align_mul = 4); + .align_mul = 4); nir_pop_if(&b, NULL); @@ -338,12 +335,11 @@ build_pipeline_statistics_query_shader(struct radv_device *device) nir_ssa_def *output_elem = nir_iadd(&b, output_base, nir_imul(&b, elem_size, current_counter)); nir_push_if(&b, result_is_64bit); - nir_store_ssbo(&b, nir_imm_int64(&b, 0), dst_buf, output_elem, .write_mask = 0x1, - .align_mul = 8); + nir_store_ssbo(&b, nir_imm_int64(&b, 0), dst_buf, output_elem, .align_mul = 8); nir_push_else(&b, NULL); - nir_store_ssbo(&b, nir_imm_int(&b, 0), dst_buf, output_elem, .write_mask = 0x1, .align_mul = 4); + nir_store_ssbo(&b, nir_imm_int(&b, 0), dst_buf, output_elem, .align_mul = 4); nir_pop_if(&b, NULL); @@ -464,13 +460,12 @@ build_tfb_query_shader(struct radv_device *device) /* Store result. */ nir_push_if(&b, result_is_64bit); - nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x3, - .align_mul = 8); + nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .align_mul = 8); nir_push_else(&b, NULL); nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base, - .write_mask = 0x3, .align_mul = 4); + .align_mul = 4); nir_pop_if(&b, NULL); nir_pop_if(&b, NULL); @@ -571,13 +566,12 @@ build_timestamp_query_shader(struct radv_device *device) /* Store result. */ nir_push_if(&b, result_is_64bit); - nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .write_mask = 0x1, - .align_mul = 8); + nir_store_ssbo(&b, nir_load_var(&b, result), dst_buf, output_base, .align_mul = 8); nir_push_else(&b, NULL); nir_store_ssbo(&b, nir_u2u32(&b, nir_load_var(&b, result)), dst_buf, output_base, - .write_mask = 0x1, .align_mul = 4); + .align_mul = 4); nir_pop_if(&b, NULL); -- 2.7.4