From 28d02b9d3e7b23146ac8bb28f11c797184638b5c Mon Sep 17 00:00:00 2001 From: James Park Date: Tue, 13 Oct 2020 21:48:25 -0700 Subject: [PATCH] ac,amd/llvm,radv: Initialize structs with {0} Necessary to compile with MSVC. Reviewed-by: Samuel Pitoiset Part-of: --- src/amd/common/ac_debug.c | 2 +- src/amd/common/ac_gpu_info.c | 14 +++++----- src/amd/common/ac_rtld.c | 4 +-- src/amd/common/ac_surface.c | 8 +++--- src/amd/llvm/ac_llvm_build.c | 8 +++--- src/amd/llvm/ac_nir_to_llvm.c | 10 +++---- src/amd/vulkan/layers/radv_sqtt_layer.c | 20 +++++++------- src/amd/vulkan/radv_cmd_buffer.c | 40 +++++++++++++-------------- src/amd/vulkan/radv_debug.c | 2 +- src/amd/vulkan/radv_meta_clear.c | 2 +- src/amd/vulkan/radv_meta_decompress.c | 4 +-- src/amd/vulkan/radv_meta_fast_clear.c | 4 +-- src/amd/vulkan/radv_meta_resolve.c | 6 ++-- src/amd/vulkan/radv_meta_resolve_cs.c | 2 +- src/amd/vulkan/radv_nir_to_llvm.c | 36 ++++++++++++------------ src/amd/vulkan/radv_pipeline.c | 6 ++-- src/amd/vulkan/radv_rgp.c | 12 ++++---- src/amd/vulkan/radv_shader.c | 8 +++--- src/amd/vulkan/radv_sqtt.c | 2 +- src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c | 2 +- 20 files changed, 96 insertions(+), 96 deletions(-) diff --git a/src/amd/common/ac_debug.c b/src/amd/common/ac_debug.c index bbaed82..246a89e 100644 --- a/src/amd/common/ac_debug.c +++ b/src/amd/common/ac_debug.c @@ -573,7 +573,7 @@ void ac_parse_ib_chunk(FILE *f, uint32_t *ib_ptr, int num_dw, const int *trace_i unsigned trace_id_count, enum chip_class chip_class, ac_debug_addr_callback addr_callback, void *addr_callback_data) { - struct ac_ib_parser ib = {}; + struct ac_ib_parser ib = {0}; ib.ib = ib_ptr; ib.num_dw = num_dw; ib.trace_ids = trace_ids; diff --git a/src/amd/common/ac_gpu_info.c b/src/amd/common/ac_gpu_info.c index 40db254..27b2c52 100644 --- a/src/amd/common/ac_gpu_info.c +++ b/src/amd/common/ac_gpu_info.c @@ -178,12 +178,12 @@ has_tmz_support(amdgpu_device_handle dev, bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info, struct amdgpu_gpu_info *amdinfo) { - struct drm_amdgpu_info_device device_info = {}; - struct amdgpu_buffer_size_alignments alignment_info = {}; - struct drm_amdgpu_info_hw_ip dma = {}, compute = {}, uvd = {}; - struct drm_amdgpu_info_hw_ip uvd_enc = {}, vce = {}, vcn_dec = {}, vcn_jpeg = {}; - struct drm_amdgpu_info_hw_ip vcn_enc = {}, gfx = {}; - struct amdgpu_gds_resource_info gds = {}; + struct drm_amdgpu_info_device device_info = {0}; + struct amdgpu_buffer_size_alignments alignment_info = {0}; + struct drm_amdgpu_info_hw_ip dma = {0}, compute = {0}, uvd = {0}; + struct drm_amdgpu_info_hw_ip uvd_enc = {0}, vce = {0}, vcn_dec = {0}, vcn_jpeg = {0}; + struct drm_amdgpu_info_hw_ip vcn_enc = {0}, gfx = {0}; + struct amdgpu_gds_resource_info gds = {0}; uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0; int r, i, j; amdgpu_device_handle dev = dev_p; @@ -331,7 +331,7 @@ bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info, } if (info->drm_minor >= 9) { - struct drm_amdgpu_memory_info meminfo = {}; + struct drm_amdgpu_memory_info meminfo = {0}; r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo); if (r) { diff --git a/src/amd/common/ac_rtld.c b/src/amd/common/ac_rtld.c index 8a9cd7c..4b54da4 100644 --- a/src/amd/common/ac_rtld.c +++ b/src/amd/common/ac_rtld.c @@ -202,7 +202,7 @@ static bool read_private_lds_symbols(struct ac_rtld_binary *binary, unsigned par size_t num_symbols = symbols_data->d_size / sizeof(Elf64_Sym); for (size_t j = 0; j < num_symbols; ++j, ++symbol) { - struct ac_rtld_symbol s = {}; + struct ac_rtld_symbol s = {0}; if (ELF64_ST_TYPE(symbol->st_info) == STT_AMDGPU_LDS) { /* old-style LDS symbols from initial prototype -- remove eventually */ @@ -520,7 +520,7 @@ bool ac_rtld_read_config(const struct radeon_info *info, struct ac_rtld_binary * return false; /* TODO: be precise about scratch use? */ - struct ac_shader_config c = {}; + struct ac_shader_config c = {0}; ac_parse_shader_binary_config(config_data, config_nbytes, binary->wave_size, true, info, &c); config->num_sgprs = MAX2(config->num_sgprs, c.num_sgprs); diff --git a/src/amd/common/ac_surface.c b/src/amd/common/ac_surface.c index 10648d1..9eddd3c 100644 --- a/src/amd/common/ac_surface.c +++ b/src/amd/common/ac_surface.c @@ -185,7 +185,7 @@ ac_compute_dcc_retile_tile_indices(struct ac_addrlib *addrlib, const struct rade if (!indices) return NULL; - ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {}; + ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {0}; addrout.size = sizeof(addrout); for (unsigned y = 0; y < h; ++y) { @@ -1118,7 +1118,7 @@ static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *i !(surf->flags & RADEON_SURF_NO_FMASK)) { ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0}; ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0}; - ADDR_TILEINFO fmask_tile_info = {}; + ADDR_TILEINFO fmask_tile_info = {0}; fin.size = sizeof(fin); fout.size = sizeof(fout); @@ -1365,7 +1365,7 @@ static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_ const struct ac_surf_config *config, struct radeon_surf *surf, bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in) { - ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {}; + ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0}; ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0}; ADDR_E_RETURNCODE ret; @@ -1500,7 +1500,7 @@ static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_ !in->flags.metaPipeUnaligned))) { ADDR2_COMPUTE_DCCINFO_INPUT din = {0}; ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0}; - ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {}; + ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0}; din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT); dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT); diff --git a/src/amd/llvm/ac_llvm_build.c b/src/amd/llvm/ac_llvm_build.c index 44ebb01..e0ddf45 100644 --- a/src/amd/llvm/ac_llvm_build.c +++ b/src/amd/llvm/ac_llvm_build.c @@ -2701,7 +2701,7 @@ static bool ac_eliminate_const_output(uint8_t *vs_output_param_offset, uint32_t struct ac_vs_exp_inst *exp) { unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */ - bool is_zero[4] = {}, is_one[4] = {}; + bool is_zero[4] = {0}, is_one[4] = {0}; for (i = 0; i < 4; i++) { /* It's a constant expression. Undef outputs are eliminated too. */ @@ -3268,7 +3268,7 @@ LLVMValueRef ac_unpack_param(struct ac_llvm_context *ctx, LLVMValueRef param, un void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask, LLVMValueRef *addr, bool is_array_tex) { - struct ac_image_args fmask_load = {}; + struct ac_image_args fmask_load = {0}; fmask_load.opcode = ac_image_load; fmask_load.resource = fmask; fmask_load.dmask = 0xf; @@ -4486,11 +4486,11 @@ void ac_build_sendmsg_gs_alloc_req(struct ac_llvm_context *ctx, LLVMValueRef wav ac_build_sendmsg(ctx, AC_SENDMSG_GS_ALLOC_REQ, tmp); if (export_dummy_prim) { - struct ac_ngg_prim prim = {}; + struct ac_ngg_prim prim = {0}; /* The vertex indices are 0,0,0. */ prim.passthrough = ctx->i32_0; - struct ac_export_args pos = {}; + struct ac_export_args pos = {0}; pos.out[0] = pos.out[1] = pos.out[2] = pos.out[3] = ctx->f32_0; pos.target = V_008DFC_SQ_EXP_POS; pos.enabled_channels = 0xf; diff --git a/src/amd/llvm/ac_nir_to_llvm.c b/src/amd/llvm/ac_nir_to_llvm.c index 2ec2ad0..07db0c6 100644 --- a/src/amd/llvm/ac_nir_to_llvm.c +++ b/src/amd/llvm/ac_nir_to_llvm.c @@ -1274,7 +1274,7 @@ static LLVMValueRef lower_gather4_integer(struct ac_llvm_context *ctx, nir_varia assert(!wa_8888); half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5); } else { - struct ac_image_args resinfo = {}; + struct ac_image_args resinfo = {0}; LLVMBasicBlockRef bbs[2]; LLVMValueRef unnorm = NULL; @@ -2385,7 +2385,7 @@ static LLVMValueRef visit_image_load(struct ac_nir_context *ctx, const nir_intri struct waterfall_context wctx; LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr); - struct ac_image_args args = {}; + struct ac_image_args args = {0}; args.cache_policy = get_cache_policy(ctx, access, false, false); @@ -2455,7 +2455,7 @@ static void visit_image_store(struct ac_nir_context *ctx, const nir_intrinsic_in LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr); bool writeonly_memory = access & ACCESS_NON_READABLE; - struct ac_image_args args = {}; + struct ac_image_args args = {0}; args.cache_policy = get_cache_policy(ctx, access, true, writeonly_memory); @@ -2627,7 +2627,7 @@ static LLVMValueRef visit_image_atomic(struct ac_nir_context *ctx, const nir_int assert(length < sizeof(intrinsic_name)); result = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32, params, param_count, 0); } else { - struct ac_image_args args = {}; + struct ac_image_args args = {0}; args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic; args.atomic = atomic_subop; args.data[0] = params[0]; @@ -4771,7 +4771,7 @@ static void setup_shared(struct ac_nir_context *ctx, struct nir_shader *nir) void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi, const struct ac_shader_args *args, struct nir_shader *nir) { - struct ac_nir_context ctx = {}; + struct ac_nir_context ctx = {0}; struct nir_function *func; ctx.ac = *ac; diff --git a/src/amd/vulkan/layers/radv_sqtt_layer.c b/src/amd/vulkan/layers/radv_sqtt_layer.c index 5f3de07..78b9dea 100644 --- a/src/amd/vulkan/layers/radv_sqtt_layer.c +++ b/src/amd/vulkan/layers/radv_sqtt_layer.c @@ -355,7 +355,7 @@ static void radv_write_begin_general_api_marker(struct radv_cmd_buffer *cmd_buffer, enum rgp_sqtt_marker_general_api_type api_type) { - struct rgp_sqtt_marker_general_api marker = {}; + struct rgp_sqtt_marker_general_api marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API; @@ -368,7 +368,7 @@ static void radv_write_end_general_api_marker(struct radv_cmd_buffer *cmd_buffer, enum rgp_sqtt_marker_general_api_type api_type) { - struct rgp_sqtt_marker_general_api marker = {}; + struct rgp_sqtt_marker_general_api marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API; @@ -385,7 +385,7 @@ radv_write_event_marker(struct radv_cmd_buffer *cmd_buffer, uint32_t instance_offset_user_data, uint32_t draw_index_user_data) { - struct rgp_sqtt_marker_event marker = {}; + struct rgp_sqtt_marker_event marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT; @@ -414,7 +414,7 @@ radv_write_event_with_dims_marker(struct radv_cmd_buffer *cmd_buffer, enum rgp_sqtt_marker_event_type api_type, uint32_t x, uint32_t y, uint32_t z) { - struct rgp_sqtt_marker_event_with_dims marker = {}; + struct rgp_sqtt_marker_event_with_dims marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; marker.event.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT; @@ -434,7 +434,7 @@ void radv_describe_begin_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { uint64_t device_id = (uintptr_t)cmd_buffer->device; - struct rgp_sqtt_marker_cb_start marker = {}; + struct rgp_sqtt_marker_cb_start marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; if (likely(!cmd_buffer->device->thread_trace_bo)) @@ -459,7 +459,7 @@ void radv_describe_end_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) { uint64_t device_id = (uintptr_t)cmd_buffer->device; - struct rgp_sqtt_marker_cb_end marker = {}; + struct rgp_sqtt_marker_cb_end marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; if (likely(!cmd_buffer->device->thread_trace_bo)) @@ -511,7 +511,7 @@ radv_describe_end_render_pass_clear(struct radv_cmd_buffer *cmd_buffer) void radv_describe_barrier_end_delayed(struct radv_cmd_buffer *cmd_buffer) { - struct rgp_sqtt_marker_barrier_end marker = {}; + struct rgp_sqtt_marker_barrier_end marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; if (likely(!cmd_buffer->device->thread_trace_bo) || @@ -568,7 +568,7 @@ void radv_describe_barrier_start(struct radv_cmd_buffer *cmd_buffer, enum rgp_barrier_reason reason) { - struct rgp_sqtt_marker_barrier_start marker = {}; + struct rgp_sqtt_marker_barrier_start marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; if (likely(!cmd_buffer->device->thread_trace_bo)) @@ -594,7 +594,7 @@ void radv_describe_layout_transition(struct radv_cmd_buffer *cmd_buffer, const struct radv_barrier_data *barrier) { - struct rgp_sqtt_marker_layout_transition marker = {}; + struct rgp_sqtt_marker_layout_transition marker = {0}; struct radeon_cmdbuf *cs = cmd_buffer->cs; if (likely(!cmd_buffer->device->thread_trace_bo)) @@ -624,7 +624,7 @@ radv_handle_thread_trace(VkQueue _queue) static uint64_t num_frames = 0; if (thread_trace_enabled) { - struct radv_thread_trace thread_trace = {}; + struct radv_thread_trace thread_trace = {0}; radv_end_thread_trace(queue); thread_trace_enabled = false; diff --git a/src/amd/vulkan/radv_cmd_buffer.c b/src/amd/vulkan/radv_cmd_buffer.c index 72c2934..c3465d6 100644 --- a/src/amd/vulkan/radv_cmd_buffer.c +++ b/src/amd/vulkan/radv_cmd_buffer.c @@ -677,7 +677,7 @@ radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer, struct radv_descriptor_state *descriptors_state = radv_get_descriptors_state(cmd_buffer, bind_point); struct radv_device *device = cmd_buffer->device; - uint32_t data[MAX_SETS * 2] = {}; + uint32_t data[MAX_SETS * 2] = {0}; uint64_t va; unsigned i; va = radv_buffer_get_va(device->trace_bo) + 24; @@ -855,7 +855,7 @@ radv_emit_sample_locations(struct radv_cmd_buffer *cmd_buffer) &cmd_buffer->state.dynamic.sample_location; uint32_t num_samples = (uint32_t)sample_location->per_pixel; struct radeon_cmdbuf *cs = cmd_buffer->cs; - uint32_t sample_locs_pixel[4][2] = {}; + uint32_t sample_locs_pixel[4][2] = {0}; VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */ uint32_t max_sample_dist = 0; uint64_t centroid_priority; @@ -5370,7 +5370,7 @@ void radv_CmdDraw( uint32_t firstInstance) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.count = vertexCount; info.instance_count = instanceCount; @@ -5389,7 +5389,7 @@ void radv_CmdDrawIndexed( uint32_t firstInstance) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.indexed = true; info.count = indexCount; @@ -5410,7 +5410,7 @@ void radv_CmdDrawIndirect( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.count = drawCount; info.indirect = buffer; @@ -5429,7 +5429,7 @@ void radv_CmdDrawIndexedIndirect( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.indexed = true; info.count = drawCount; @@ -5452,7 +5452,7 @@ void radv_CmdDrawIndirectCount( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.count = maxDrawCount; info.indirect = buffer; @@ -5476,7 +5476,7 @@ void radv_CmdDrawIndexedIndirectCount( RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.indexed = true; info.count = maxDrawCount; @@ -5725,7 +5725,7 @@ void radv_CmdDispatchBase( uint32_t z) { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); - struct radv_dispatch_info info = {}; + struct radv_dispatch_info info = {0}; info.blocks[0] = x; info.blocks[1] = y; @@ -5753,7 +5753,7 @@ void radv_CmdDispatchIndirect( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); - struct radv_dispatch_info info = {}; + struct radv_dispatch_info info = {0}; info.indirect = buffer; info.indirect_offset = offset; @@ -5767,7 +5767,7 @@ void radv_unaligned_dispatch( uint32_t y, uint32_t z) { - struct radv_dispatch_info info = {}; + struct radv_dispatch_info info = {0}; info.blocks[0] = x; info.blocks[1] = y; @@ -5825,8 +5825,8 @@ static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT; struct radv_cmd_state *state = &cmd_buffer->state; uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f; - VkClearDepthStencilValue value = {}; - struct radv_barrier_data barrier = {}; + VkClearDepthStencilValue value = {0}; + struct radv_barrier_data barrier = {0}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; @@ -5891,7 +5891,7 @@ static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -5917,7 +5917,7 @@ void radv_initialize_fmask(struct radv_cmd_buffer *cmd_buffer, }; uint32_t log2_samples = util_logbase2(image->info.samples); uint32_t value = fmask_clear_values[log2_samples]; - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; @@ -5935,7 +5935,7 @@ void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, const VkImageSubresourceRange *range, uint32_t value) { struct radv_cmd_state *state = &cmd_buffer->state; - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; unsigned size = 0; state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | @@ -6030,7 +6030,7 @@ static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer, if (radv_image_has_cmask(image) || radv_dcc_enabled(image, range->baseMipLevel)) { - uint32_t color_values[2] = {}; + uint32_t color_values[2] = {0}; radv_set_color_clear_metadata(cmd_buffer, image, range, color_values); } @@ -6090,7 +6090,7 @@ static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffe radv_fast_clear_flush_image_inplace(cmd_buffer, image, range); if (fmask_expand) { - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; barrier.layout_transitions.fmask_color_expand = 1; radv_describe_layout_transition(cmd_buffer, &barrier); @@ -6236,7 +6236,7 @@ radv_barrier(struct radv_cmd_buffer *cmd_buffer, const struct VkSampleLocationsInfoEXT *sample_locs_info = vk_find_struct_const(pImageMemoryBarriers[i].pNext, SAMPLE_LOCATIONS_INFO_EXT); - struct radv_sample_locations_state sample_locations = {}; + struct radv_sample_locations_state sample_locations = {0}; if (sample_locs_info) { assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT); @@ -6891,7 +6891,7 @@ void radv_CmdDrawIndirectByteCountEXT( { RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer); - struct radv_draw_info info = {}; + struct radv_draw_info info = {0}; info.instance_count = instanceCount; info.first_instance = firstInstance; diff --git a/src/amd/vulkan/radv_debug.c b/src/amd/vulkan/radv_debug.c index ea53acd..8cdbc88 100644 --- a/src/amd/vulkan/radv_debug.c +++ b/src/amd/vulkan/radv_debug.c @@ -534,7 +534,7 @@ static void radv_dump_device_name(struct radv_device *device, FILE *f) { struct radeon_info *info = &device->physical_device->rad_info; - char kernel_version[128] = {}; + char kernel_version[128] = {0}; struct utsname uname_data; const char *chip_name; diff --git a/src/amd/vulkan/radv_meta_clear.c b/src/amd/vulkan/radv_meta_clear.c index 7001cd1..4484895 100644 --- a/src/amd/vulkan/radv_meta_clear.c +++ b/src/amd/vulkan/radv_meta_clear.c @@ -1599,7 +1599,7 @@ static void vi_get_fast_clear_parameters(struct radv_device *device, uint32_t* reset_value, bool *can_avoid_fast_clear_elim) { - bool values[4] = {}; + bool values[4] = {0}; int extra_channel; bool main_value = false; bool extra_value = false; diff --git a/src/amd/vulkan/radv_meta_decompress.c b/src/amd/vulkan/radv_meta_decompress.c index 5ab7e4f..1de57bf 100644 --- a/src/amd/vulkan/radv_meta_decompress.c +++ b/src/amd/vulkan/radv_meta_decompress.c @@ -548,7 +548,7 @@ void radv_decompress_depth_stencil(struct radv_cmd_buffer *cmd_buffer, const VkImageSubresourceRange *subresourceRange, struct radv_sample_locations_state *sample_locs) { - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; barrier.layout_transitions.depth_stencil_expand = 1; radv_describe_layout_transition(cmd_buffer, &barrier); @@ -563,7 +563,7 @@ void radv_resummarize_depth_stencil(struct radv_cmd_buffer *cmd_buffer, const VkImageSubresourceRange *subresourceRange, struct radv_sample_locations_state *sample_locs) { - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; barrier.layout_transitions.depth_stencil_resummarize = 1; radv_describe_layout_transition(cmd_buffer, &barrier); diff --git a/src/amd/vulkan/radv_meta_fast_clear.c b/src/amd/vulkan/radv_meta_fast_clear.c index 228df00..d60e862 100644 --- a/src/amd/vulkan/radv_meta_fast_clear.c +++ b/src/amd/vulkan/radv_meta_fast_clear.c @@ -779,7 +779,7 @@ radv_fast_clear_flush_image_inplace(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, const VkImageSubresourceRange *subresourceRange) { - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; if (radv_image_has_fmask(image)) { barrier.layout_transitions.fmask_decompress = 1; @@ -933,7 +933,7 @@ radv_decompress_dcc(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image, const VkImageSubresourceRange *subresourceRange) { - struct radv_barrier_data barrier = {}; + struct radv_barrier_data barrier = {0}; barrier.layout_transitions.dcc_decompress = 1; radv_describe_layout_transition(cmd_buffer, &barrier); diff --git a/src/amd/vulkan/radv_meta_resolve.c b/src/amd/vulkan/radv_meta_resolve.c index 027eee5..809522b 100644 --- a/src/amd/vulkan/radv_meta_resolve.c +++ b/src/amd/vulkan/radv_meta_resolve.c @@ -903,7 +903,7 @@ radv_decompress_resolve_subpass_src(struct radv_cmd_buffer *cmd_buffer) struct radv_image_view *src_iview = cmd_buffer->state.attachments[src_att.attachment].iview; struct radv_image *src_image = src_iview->image; - VkImageResolve2KHR region = {}; + VkImageResolve2KHR region = {0}; region.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR; region.srcSubresource.aspectMask = src_iview->aspect_mask; region.srcSubresource.mipLevel = 0; @@ -919,7 +919,7 @@ radv_decompress_resolve_subpass_src(struct radv_cmd_buffer *cmd_buffer) struct radv_image_view *src_iview = fb->attachments[src_att.attachment]; struct radv_image *src_image = src_iview->image; - VkImageResolve2KHR region = {}; + VkImageResolve2KHR region = {0}; region.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR; region.srcSubresource.aspectMask = src_iview->aspect_mask; region.srcSubresource.mipLevel = 0; @@ -958,7 +958,7 @@ radv_decompress_resolve_src(struct radv_cmd_buffer *cmd_buffer, radv_meta_get_iview_layer(src_image, ®ion->srcSubresource, ®ion->srcOffset); - VkImageMemoryBarrier barrier = {}; + VkImageMemoryBarrier barrier = {0}; barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT; barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT; barrier.oldLayout = src_image_layout; diff --git a/src/amd/vulkan/radv_meta_resolve_cs.c b/src/amd/vulkan/radv_meta_resolve_cs.c index a5b5c32..117b408 100644 --- a/src/amd/vulkan/radv_meta_resolve_cs.c +++ b/src/amd/vulkan/radv_meta_resolve_cs.c @@ -1039,7 +1039,7 @@ radv_depth_stencil_resolve_subpass_cs(struct radv_cmd_buffer *cmd_buffer, if (radv_image_has_htile(dst_image)) { if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) { - VkImageSubresourceRange range = {}; + VkImageSubresourceRange range = {0}; range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; range.baseMipLevel = dst_iview->base_mip; range.levelCount = 1; diff --git a/src/amd/vulkan/radv_nir_to_llvm.c b/src/amd/vulkan/radv_nir_to_llvm.c index 952e73f..793381b 100644 --- a/src/amd/vulkan/radv_nir_to_llvm.c +++ b/src/amd/vulkan/radv_nir_to_llvm.c @@ -1659,8 +1659,8 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream) /* Load the descriptor and compute the write offset for each * enabled buffer. */ - LLVMValueRef so_write_offset[4] = {}; - LLVMValueRef so_buffers[4] = {}; + LLVMValueRef so_write_offset[4] = {0}; + LLVMValueRef so_buffers[4] = {0}; LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->args->streamout_buffers); for (i = 0; i < 4; i++) { @@ -1690,7 +1690,7 @@ radv_emit_streamout(struct radv_shader_context *ctx, unsigned stream) /* Write streamout data. */ for (i = 0; i < ctx->args->shader_info->so.num_outputs; i++) { - struct radv_shader_output_values shader_out = {}; + struct radv_shader_output_values shader_out = {0}; struct radv_stream_output *output = &ctx->args->shader_info->so.outputs[i]; @@ -1754,7 +1754,7 @@ radv_llvm_export_vs(struct radv_shader_context *ctx, bool export_clip_dists) { LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_value = NULL; - struct ac_export_args pos_args[4] = {}; + struct ac_export_args pos_args[4] = {0}; unsigned pos_idx, index; int i; @@ -2223,7 +2223,7 @@ static void build_streamout_vertex(struct radv_shader_context *ctx, { struct radv_streamout_info *so = &ctx->args->shader_info->so; LLVMBuilderRef builder = ctx->ac.builder; - LLVMValueRef offset[4] = {}; + LLVMValueRef offset[4] = {0}; LLVMValueRef tmp; for (unsigned buffer = 0; buffer < 4; ++buffer) { @@ -2279,7 +2279,7 @@ static void build_streamout_vertex(struct radv_shader_context *ctx, output->stream != stream) continue; - struct radv_shader_output_values out = {}; + struct radv_shader_output_values out = {0}; for (unsigned j = 0; j < 4; j++) { out.values[j] = outputs[i].values[j]; @@ -2295,7 +2295,7 @@ static void build_streamout_vertex(struct radv_shader_context *ctx, if (stream != output->stream) continue; - struct radv_shader_output_values out = {}; + struct radv_shader_output_values out = {0}; for (unsigned comp = 0; comp < 4; comp++) { if (!(output->component_mask & (1 << comp))) @@ -2342,13 +2342,13 @@ static void build_streamout(struct radv_shader_context *ctx, LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false); LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false); LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false); - LLVMValueRef so_buffer[4] = {}; + LLVMValueRef so_buffer[4] = {0}; unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) + (nggso->vertices[2] ? 1 : 0); - LLVMValueRef prim_stride_dw[4] = {}; + LLVMValueRef prim_stride_dw[4] = {0}; LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32); int stream_for_buffer[4] = { -1, -1, -1, -1 }; - unsigned bufmask_for_stream[4] = {}; + unsigned bufmask_for_stream[4] = {0}; bool isgs = ctx->stage == MESA_SHADER_GEOMETRY; unsigned scratch_emit_base = isgs ? 4 : 0; LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0; @@ -2463,7 +2463,7 @@ static void build_streamout(struct radv_shader_context *ctx, * because LLVM can't generate divide-by-multiply if we try to do this * via VALU with one lane per buffer. */ - LLVMValueRef max_emit[4] = {}; + LLVMValueRef max_emit[4] = {0}; for (unsigned buffer = 0; buffer < 4; ++buffer) { if (stream_for_buffer[buffer] == -1) continue; @@ -2560,7 +2560,7 @@ static void build_streamout(struct radv_shader_context *ctx, /* Determine the workgroup-relative per-thread / primitive offset into * the streamout buffers */ - struct ac_wg_scan primemit_scan[4] = {}; + struct ac_wg_scan primemit_scan[4] = {0}; if (isgs) { for (unsigned stream = 0; stream < 4; ++stream) { @@ -2583,7 +2583,7 @@ static void build_streamout(struct radv_shader_context *ctx, ac_build_s_barrier(&ctx->ac); /* Fetch the per-buffer offsets and per-stream emit counts in all waves. */ - LLVMValueRef wgoffset_dw[4] = {}; + LLVMValueRef wgoffset_dw[4] = {0}; { LLVMValueRef scratch_vgpr; @@ -2754,7 +2754,7 @@ handle_ngg_outputs_post_2(struct radv_shader_context *ctx) /* Streamout */ if (ctx->args->shader_info->so.num_outputs) { - struct ngg_streamout nggso = {}; + struct ngg_streamout nggso = {0}; nggso.num_vertices = num_vertices_val; nggso.prim_enable[0] = is_gs_thread; @@ -2805,7 +2805,7 @@ handle_ngg_outputs_post_2(struct radv_shader_context *ctx) */ ac_build_ifcc(&ctx->ac, is_gs_thread, 6001); { - struct ac_ngg_prim prim = {}; + struct ac_ngg_prim prim = {0}; if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) { prim.passthrough = ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]); @@ -2977,7 +2977,7 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx) /* Streamout */ if (ctx->args->shader_info->so.num_outputs) { - struct ngg_streamout nggso = {}; + struct ngg_streamout nggso = {0}; nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false); @@ -3067,7 +3067,7 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx) /* Inclusive scan addition across the current wave. */ LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, ""); - struct ac_wg_scan vertlive_scan = {}; + struct ac_wg_scan vertlive_scan = {0}; vertlive_scan.op = nir_op_iadd; vertlive_scan.enable_reduce = true; vertlive_scan.enable_exclusive = true; @@ -3119,7 +3119,7 @@ static void gfx10_ngg_gs_emit_epilogue_2(struct radv_shader_context *ctx) ac_build_ifcc(&ctx->ac, tmp, 5140); { LLVMValueRef flags; - struct ac_ngg_prim prim = {}; + struct ac_ngg_prim prim = {0}; prim.num_vertices = verts_per_prim; tmp = ngg_gs_vertex_ptr(ctx, tid); diff --git a/src/amd/vulkan/radv_pipeline.c b/src/amd/vulkan/radv_pipeline.c index 70ad869..4d31513 100644 --- a/src/amd/vulkan/radv_pipeline.c +++ b/src/amd/vulkan/radv_pipeline.c @@ -452,7 +452,7 @@ static unsigned radv_choose_spi_color_format(VkFormat vk_format, bool blend_need_alpha) { const struct vk_format_description *desc = vk_format_description(vk_format); - struct ac_spi_color_formats formats = {}; + struct ac_spi_color_formats formats = {0}; unsigned format, ntype, swap; format = radv_translate_colorformat(vk_format); @@ -3035,8 +3035,8 @@ VkResult radv_create_shaders(struct radv_pipeline *pipeline, struct radv_shader_binary *gs_copy_binary = NULL; if (!pipeline->gs_copy_shader && !radv_pipeline_has_ngg(pipeline)) { - struct radv_shader_info info = {}; - struct radv_shader_variant_key key = {}; + struct radv_shader_info info = {0}; + struct radv_shader_variant_key key = {0}; key.has_multiview_view_index = keys[MESA_SHADER_GEOMETRY].has_multiview_view_index; diff --git a/src/amd/vulkan/radv_rgp.c b/src/amd/vulkan/radv_rgp.c index 77b542c..d9f4158 100644 --- a/src/amd/vulkan/radv_rgp.c +++ b/src/amd/vulkan/radv_rgp.c @@ -586,10 +586,10 @@ radv_sqtt_dump_data(struct radv_device *device, const struct radv_thread_trace *thread_trace, FILE *output) { - struct sqtt_file_chunk_asic_info asic_info = {}; - struct sqtt_file_chunk_cpu_info cpu_info = {}; - struct sqtt_file_chunk_api_info api_info = {}; - struct sqtt_file_header header = {}; + struct sqtt_file_chunk_asic_info asic_info = {0}; + struct sqtt_file_chunk_cpu_info cpu_info = {0}; + struct sqtt_file_chunk_api_info api_info = {0}; + struct sqtt_file_header header = {0}; size_t file_offset = 0; /* SQTT header file. */ @@ -616,8 +616,8 @@ radv_sqtt_dump_data(struct radv_device *device, for (unsigned i = 0; i < thread_trace->num_traces; i++) { const struct radv_thread_trace_se *se = &thread_trace->traces[i]; const struct radv_thread_trace_info *info = &se->info; - struct sqtt_file_chunk_sqtt_desc desc = {}; - struct sqtt_file_chunk_sqtt_data data = {}; + struct sqtt_file_chunk_sqtt_desc desc = {0}; + struct sqtt_file_chunk_sqtt_data data = {0}; uint64_t size = info->cur_offset * 32; /* unit of 32 bytes */ /* SQTT desc chunk. */ diff --git a/src/amd/vulkan/radv_shader.c b/src/amd/vulkan/radv_shader.c index 5740ba7..7ac93f1 100644 --- a/src/amd/vulkan/radv_shader.c +++ b/src/amd/vulkan/radv_shader.c @@ -1279,7 +1279,7 @@ shader_variant_compile(struct radv_device *device, options->debug.func = radv_compiler_debug; options->debug.private_data = &debug_data; - struct radv_shader_args args = {}; + struct radv_shader_args args = {0}; args.options = options; args.shader_info = info; args.is_gs_copy_shader = gs_copy_shader; @@ -1563,7 +1563,7 @@ radv_GetShaderInfoAMD(VkDevice _device, unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256; struct ac_shader_config *conf = &variant->config; - VkShaderStatisticsInfoAMD statistics = {}; + VkShaderStatisticsInfoAMD statistics = {0}; statistics.shaderStageMask = shaderStage; statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd; statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd; @@ -1647,7 +1647,7 @@ radv_dump_shader_stats(struct radv_device *device, uint32_t prop_count = 0; VkResult result; - VkPipelineInfoKHR pipeline_info = {}; + VkPipelineInfoKHR pipeline_info = {0}; pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR; pipeline_info.pipeline = radv_pipeline_to_handle(pipeline); @@ -1675,7 +1675,7 @@ radv_dump_shader_stats(struct radv_device *device, uint32_t stat_count = 0; VkResult result; - VkPipelineExecutableInfoKHR exec_info = {}; + VkPipelineExecutableInfoKHR exec_info = {0}; exec_info.pipeline = radv_pipeline_to_handle(pipeline); exec_info.executableIndex = i; diff --git a/src/amd/vulkan/radv_sqtt.c b/src/amd/vulkan/radv_sqtt.c index 345637c..57f8856 100644 --- a/src/amd/vulkan/radv_sqtt.c +++ b/src/amd/vulkan/radv_sqtt.c @@ -614,7 +614,7 @@ radv_get_thread_trace(struct radv_queue *queue, void *data_ptr = thread_trace_ptr + data_offset; struct radv_thread_trace_info *info = (struct radv_thread_trace_info *)info_ptr; - struct radv_thread_trace_se thread_trace_se = {}; + struct radv_thread_trace_se thread_trace_se = {0}; if (!radv_is_thread_trace_complete(device, info)) { uint32_t expected_size = diff --git a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c index 7df4375..c66bf3e 100644 --- a/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c +++ b/src/amd/vulkan/winsys/amdgpu/radv_amdgpu_cs.c @@ -979,7 +979,7 @@ radv_amdgpu_winsys_cs_submit_fallback(struct radeon_winsys_ctx *_ctx, struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx); struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence; struct drm_amdgpu_bo_list_entry *handles = NULL; - struct radv_amdgpu_cs_request request = {}; + struct radv_amdgpu_cs_request request = {0}; struct amdgpu_cs_ib_info *ibs; struct radv_amdgpu_cs *cs0; struct radv_amdgpu_winsys *aws; -- 2.7.4