Necessary to compile with MSVC.
Reviewed-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7123>
unsigned trace_id_count, enum chip_class chip_class,
ac_debug_addr_callback addr_callback, void *addr_callback_data)
{
- struct ac_ib_parser ib = {};
+ struct ac_ib_parser ib = {0};
ib.ib = ib_ptr;
ib.num_dw = num_dw;
ib.trace_ids = trace_ids;
bool ac_query_gpu_info(int fd, void *dev_p, struct radeon_info *info,
struct amdgpu_gpu_info *amdinfo)
{
- struct drm_amdgpu_info_device device_info = {};
- struct amdgpu_buffer_size_alignments alignment_info = {};
- struct drm_amdgpu_info_hw_ip dma = {}, compute = {}, uvd = {};
- struct drm_amdgpu_info_hw_ip uvd_enc = {}, vce = {}, vcn_dec = {}, vcn_jpeg = {};
- struct drm_amdgpu_info_hw_ip vcn_enc = {}, gfx = {};
- struct amdgpu_gds_resource_info gds = {};
+ struct drm_amdgpu_info_device device_info = {0};
+ struct amdgpu_buffer_size_alignments alignment_info = {0};
+ struct drm_amdgpu_info_hw_ip dma = {0}, compute = {0}, uvd = {0};
+ struct drm_amdgpu_info_hw_ip uvd_enc = {0}, vce = {0}, vcn_dec = {0}, vcn_jpeg = {0};
+ struct drm_amdgpu_info_hw_ip vcn_enc = {0}, gfx = {0};
+ struct amdgpu_gds_resource_info gds = {0};
uint32_t vce_version = 0, vce_feature = 0, uvd_version = 0, uvd_feature = 0;
int r, i, j;
amdgpu_device_handle dev = dev_p;
}
if (info->drm_minor >= 9) {
- struct drm_amdgpu_memory_info meminfo = {};
+ struct drm_amdgpu_memory_info meminfo = {0};
r = amdgpu_query_info(dev, AMDGPU_INFO_MEMORY, sizeof(meminfo), &meminfo);
if (r) {
size_t num_symbols = symbols_data->d_size / sizeof(Elf64_Sym);
for (size_t j = 0; j < num_symbols; ++j, ++symbol) {
- struct ac_rtld_symbol s = {};
+ struct ac_rtld_symbol s = {0};
if (ELF64_ST_TYPE(symbol->st_info) == STT_AMDGPU_LDS) {
/* old-style LDS symbols from initial prototype -- remove eventually */
return false;
/* TODO: be precise about scratch use? */
- struct ac_shader_config c = {};
+ struct ac_shader_config c = {0};
ac_parse_shader_binary_config(config_data, config_nbytes, binary->wave_size, true, info, &c);
config->num_sgprs = MAX2(config->num_sgprs, c.num_sgprs);
if (!indices)
return NULL;
- ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {};
+ ADDR2_COMPUTE_DCC_ADDRFROMCOORD_OUTPUT addrout = {0};
addrout.size = sizeof(addrout);
for (unsigned y = 0; y < h; ++y) {
!(surf->flags & RADEON_SURF_NO_FMASK)) {
ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
- ADDR_TILEINFO fmask_tile_info = {};
+ ADDR_TILEINFO fmask_tile_info = {0};
fin.size = sizeof(fin);
fout.size = sizeof(fout);
const struct ac_surf_config *config, struct radeon_surf *surf,
bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
{
- ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {};
+ ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
ADDR_E_RETURNCODE ret;
!in->flags.metaPipeUnaligned))) {
ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
- ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {};
+ ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
struct ac_vs_exp_inst *exp)
{
unsigned i, default_val; /* SPI_PS_INPUT_CNTL_i.DEFAULT_VAL */
- bool is_zero[4] = {}, is_one[4] = {};
+ bool is_zero[4] = {0}, is_one[4] = {0};
for (i = 0; i < 4; i++) {
/* It's a constant expression. Undef outputs are eliminated too. */
void ac_apply_fmask_to_sample(struct ac_llvm_context *ac, LLVMValueRef fmask, LLVMValueRef *addr,
bool is_array_tex)
{
- struct ac_image_args fmask_load = {};
+ struct ac_image_args fmask_load = {0};
fmask_load.opcode = ac_image_load;
fmask_load.resource = fmask;
fmask_load.dmask = 0xf;
ac_build_sendmsg(ctx, AC_SENDMSG_GS_ALLOC_REQ, tmp);
if (export_dummy_prim) {
- struct ac_ngg_prim prim = {};
+ struct ac_ngg_prim prim = {0};
/* The vertex indices are 0,0,0. */
prim.passthrough = ctx->i32_0;
- struct ac_export_args pos = {};
+ struct ac_export_args pos = {0};
pos.out[0] = pos.out[1] = pos.out[2] = pos.out[3] = ctx->f32_0;
pos.target = V_008DFC_SQ_EXP_POS;
pos.enabled_channels = 0xf;
assert(!wa_8888);
half_texel[0] = half_texel[1] = LLVMConstReal(ctx->f32, -0.5);
} else {
- struct ac_image_args resinfo = {};
+ struct ac_image_args resinfo = {0};
LLVMBasicBlockRef bbs[2];
LLVMValueRef unnorm = NULL;
struct waterfall_context wctx;
LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
- struct ac_image_args args = {};
+ struct ac_image_args args = {0};
args.cache_policy = get_cache_policy(ctx, access, false, false);
LLVMValueRef dynamic_index = enter_waterfall_image(ctx, &wctx, instr);
bool writeonly_memory = access & ACCESS_NON_READABLE;
- struct ac_image_args args = {};
+ struct ac_image_args args = {0};
args.cache_policy = get_cache_policy(ctx, access, true, writeonly_memory);
assert(length < sizeof(intrinsic_name));
result = ac_build_intrinsic(&ctx->ac, intrinsic_name, ctx->ac.i32, params, param_count, 0);
} else {
- struct ac_image_args args = {};
+ struct ac_image_args args = {0};
args.opcode = cmpswap ? ac_image_atomic_cmpswap : ac_image_atomic;
args.atomic = atomic_subop;
args.data[0] = params[0];
void ac_nir_translate(struct ac_llvm_context *ac, struct ac_shader_abi *abi,
const struct ac_shader_args *args, struct nir_shader *nir)
{
- struct ac_nir_context ctx = {};
+ struct ac_nir_context ctx = {0};
struct nir_function *func;
ctx.ac = *ac;
radv_write_begin_general_api_marker(struct radv_cmd_buffer *cmd_buffer,
enum rgp_sqtt_marker_general_api_type api_type)
{
- struct rgp_sqtt_marker_general_api marker = {};
+ struct rgp_sqtt_marker_general_api marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API;
radv_write_end_general_api_marker(struct radv_cmd_buffer *cmd_buffer,
enum rgp_sqtt_marker_general_api_type api_type)
{
- struct rgp_sqtt_marker_general_api marker = {};
+ struct rgp_sqtt_marker_general_api marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_GENERAL_API;
uint32_t instance_offset_user_data,
uint32_t draw_index_user_data)
{
- struct rgp_sqtt_marker_event marker = {};
+ struct rgp_sqtt_marker_event marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
marker.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT;
enum rgp_sqtt_marker_event_type api_type,
uint32_t x, uint32_t y, uint32_t z)
{
- struct rgp_sqtt_marker_event_with_dims marker = {};
+ struct rgp_sqtt_marker_event_with_dims marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
marker.event.identifier = RGP_SQTT_MARKER_IDENTIFIER_EVENT;
radv_describe_begin_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
uint64_t device_id = (uintptr_t)cmd_buffer->device;
- struct rgp_sqtt_marker_cb_start marker = {};
+ struct rgp_sqtt_marker_cb_start marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (likely(!cmd_buffer->device->thread_trace_bo))
radv_describe_end_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
{
uint64_t device_id = (uintptr_t)cmd_buffer->device;
- struct rgp_sqtt_marker_cb_end marker = {};
+ struct rgp_sqtt_marker_cb_end marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (likely(!cmd_buffer->device->thread_trace_bo))
void
radv_describe_barrier_end_delayed(struct radv_cmd_buffer *cmd_buffer)
{
- struct rgp_sqtt_marker_barrier_end marker = {};
+ struct rgp_sqtt_marker_barrier_end marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (likely(!cmd_buffer->device->thread_trace_bo) ||
radv_describe_barrier_start(struct radv_cmd_buffer *cmd_buffer,
enum rgp_barrier_reason reason)
{
- struct rgp_sqtt_marker_barrier_start marker = {};
+ struct rgp_sqtt_marker_barrier_start marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (likely(!cmd_buffer->device->thread_trace_bo))
radv_describe_layout_transition(struct radv_cmd_buffer *cmd_buffer,
const struct radv_barrier_data *barrier)
{
- struct rgp_sqtt_marker_layout_transition marker = {};
+ struct rgp_sqtt_marker_layout_transition marker = {0};
struct radeon_cmdbuf *cs = cmd_buffer->cs;
if (likely(!cmd_buffer->device->thread_trace_bo))
static uint64_t num_frames = 0;
if (thread_trace_enabled) {
- struct radv_thread_trace thread_trace = {};
+ struct radv_thread_trace thread_trace = {0};
radv_end_thread_trace(queue);
thread_trace_enabled = false;
struct radv_descriptor_state *descriptors_state =
radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_device *device = cmd_buffer->device;
- uint32_t data[MAX_SETS * 2] = {};
+ uint32_t data[MAX_SETS * 2] = {0};
uint64_t va;
unsigned i;
va = radv_buffer_get_va(device->trace_bo) + 24;
&cmd_buffer->state.dynamic.sample_location;
uint32_t num_samples = (uint32_t)sample_location->per_pixel;
struct radeon_cmdbuf *cs = cmd_buffer->cs;
- uint32_t sample_locs_pixel[4][2] = {};
+ uint32_t sample_locs_pixel[4][2] = {0};
VkOffset2D sample_locs[4][8]; /* 8 is the max. sample count supported */
uint32_t max_sample_dist = 0;
uint64_t centroid_priority;
uint32_t firstInstance)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.count = vertexCount;
info.instance_count = instanceCount;
uint32_t firstInstance)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.indexed = true;
info.count = indexCount;
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.count = drawCount;
info.indirect = buffer;
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.indexed = true;
info.count = drawCount;
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.count = maxDrawCount;
info.indirect = buffer;
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.indexed = true;
info.count = maxDrawCount;
uint32_t z)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radv_dispatch_info info = {};
+ struct radv_dispatch_info info = {0};
info.blocks[0] = x;
info.blocks[1] = y;
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
- struct radv_dispatch_info info = {};
+ struct radv_dispatch_info info = {0};
info.indirect = buffer;
info.indirect_offset = offset;
uint32_t y,
uint32_t z)
{
- struct radv_dispatch_info info = {};
+ struct radv_dispatch_info info = {0};
info.blocks[0] = x;
info.blocks[1] = y;
VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
struct radv_cmd_state *state = &cmd_buffer->state;
uint32_t htile_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
- VkClearDepthStencilValue value = {};
- struct radv_barrier_data barrier = {};
+ VkClearDepthStencilValue value = {0};
+ struct radv_barrier_data barrier = {0};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
};
uint32_t log2_samples = util_logbase2(image->info.samples);
uint32_t value = fmask_clear_values[log2_samples];
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
const VkImageSubresourceRange *range, uint32_t value)
{
struct radv_cmd_state *state = &cmd_buffer->state;
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
unsigned size = 0;
state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
if (radv_image_has_cmask(image) ||
radv_dcc_enabled(image, range->baseMipLevel)) {
- uint32_t color_values[2] = {};
+ uint32_t color_values[2] = {0};
radv_set_color_clear_metadata(cmd_buffer, image, range,
color_values);
}
radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
if (fmask_expand) {
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
barrier.layout_transitions.fmask_color_expand = 1;
radv_describe_layout_transition(cmd_buffer, &barrier);
const struct VkSampleLocationsInfoEXT *sample_locs_info =
vk_find_struct_const(pImageMemoryBarriers[i].pNext,
SAMPLE_LOCATIONS_INFO_EXT);
- struct radv_sample_locations_state sample_locations = {};
+ struct radv_sample_locations_state sample_locations = {0};
if (sample_locs_info) {
assert(image->flags & VK_IMAGE_CREATE_SAMPLE_LOCATIONS_COMPATIBLE_DEPTH_BIT_EXT);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_buffer, counterBuffer, _counterBuffer);
- struct radv_draw_info info = {};
+ struct radv_draw_info info = {0};
info.instance_count = instanceCount;
info.first_instance = firstInstance;
radv_dump_device_name(struct radv_device *device, FILE *f)
{
struct radeon_info *info = &device->physical_device->rad_info;
- char kernel_version[128] = {};
+ char kernel_version[128] = {0};
struct utsname uname_data;
const char *chip_name;
uint32_t* reset_value,
bool *can_avoid_fast_clear_elim)
{
- bool values[4] = {};
+ bool values[4] = {0};
int extra_channel;
bool main_value = false;
bool extra_value = false;
const VkImageSubresourceRange *subresourceRange,
struct radv_sample_locations_state *sample_locs)
{
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
barrier.layout_transitions.depth_stencil_expand = 1;
radv_describe_layout_transition(cmd_buffer, &barrier);
const VkImageSubresourceRange *subresourceRange,
struct radv_sample_locations_state *sample_locs)
{
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
barrier.layout_transitions.depth_stencil_resummarize = 1;
radv_describe_layout_transition(cmd_buffer, &barrier);
struct radv_image *image,
const VkImageSubresourceRange *subresourceRange)
{
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
if (radv_image_has_fmask(image)) {
barrier.layout_transitions.fmask_decompress = 1;
struct radv_image *image,
const VkImageSubresourceRange *subresourceRange)
{
- struct radv_barrier_data barrier = {};
+ struct radv_barrier_data barrier = {0};
barrier.layout_transitions.dcc_decompress = 1;
radv_describe_layout_transition(cmd_buffer, &barrier);
struct radv_image_view *src_iview = cmd_buffer->state.attachments[src_att.attachment].iview;
struct radv_image *src_image = src_iview->image;
- VkImageResolve2KHR region = {};
+ VkImageResolve2KHR region = {0};
region.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR;
region.srcSubresource.aspectMask = src_iview->aspect_mask;
region.srcSubresource.mipLevel = 0;
struct radv_image_view *src_iview = fb->attachments[src_att.attachment];
struct radv_image *src_image = src_iview->image;
- VkImageResolve2KHR region = {};
+ VkImageResolve2KHR region = {0};
region.sType = VK_STRUCTURE_TYPE_IMAGE_RESOLVE_2_KHR;
region.srcSubresource.aspectMask = src_iview->aspect_mask;
region.srcSubresource.mipLevel = 0;
radv_meta_get_iview_layer(src_image, ®ion->srcSubresource,
®ion->srcOffset);
- VkImageMemoryBarrier barrier = {};
+ VkImageMemoryBarrier barrier = {0};
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
barrier.oldLayout = src_image_layout;
if (radv_image_has_htile(dst_image)) {
if (aspects == VK_IMAGE_ASPECT_DEPTH_BIT) {
- VkImageSubresourceRange range = {};
+ VkImageSubresourceRange range = {0};
range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
range.baseMipLevel = dst_iview->base_mip;
range.levelCount = 1;
/* Load the descriptor and compute the write offset for each
* enabled buffer.
*/
- LLVMValueRef so_write_offset[4] = {};
- LLVMValueRef so_buffers[4] = {};
+ LLVMValueRef so_write_offset[4] = {0};
+ LLVMValueRef so_buffers[4] = {0};
LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->args->streamout_buffers);
for (i = 0; i < 4; i++) {
/* Write streamout data. */
for (i = 0; i < ctx->args->shader_info->so.num_outputs; i++) {
- struct radv_shader_output_values shader_out = {};
+ struct radv_shader_output_values shader_out = {0};
struct radv_stream_output *output =
&ctx->args->shader_info->so.outputs[i];
bool export_clip_dists)
{
LLVMValueRef psize_value = NULL, layer_value = NULL, viewport_value = NULL;
- struct ac_export_args pos_args[4] = {};
+ struct ac_export_args pos_args[4] = {0};
unsigned pos_idx, index;
int i;
{
struct radv_streamout_info *so = &ctx->args->shader_info->so;
LLVMBuilderRef builder = ctx->ac.builder;
- LLVMValueRef offset[4] = {};
+ LLVMValueRef offset[4] = {0};
LLVMValueRef tmp;
for (unsigned buffer = 0; buffer < 4; ++buffer) {
output->stream != stream)
continue;
- struct radv_shader_output_values out = {};
+ struct radv_shader_output_values out = {0};
for (unsigned j = 0; j < 4; j++) {
out.values[j] = outputs[i].values[j];
if (stream != output->stream)
continue;
- struct radv_shader_output_values out = {};
+ struct radv_shader_output_values out = {0};
for (unsigned comp = 0; comp < 4; comp++) {
if (!(output->component_mask & (1 << comp)))
LLVMValueRef i32_2 = LLVMConstInt(ctx->ac.i32, 2, false);
LLVMValueRef i32_4 = LLVMConstInt(ctx->ac.i32, 4, false);
LLVMValueRef i32_8 = LLVMConstInt(ctx->ac.i32, 8, false);
- LLVMValueRef so_buffer[4] = {};
+ LLVMValueRef so_buffer[4] = {0};
unsigned max_num_vertices = 1 + (nggso->vertices[1] ? 1 : 0) +
(nggso->vertices[2] ? 1 : 0);
- LLVMValueRef prim_stride_dw[4] = {};
+ LLVMValueRef prim_stride_dw[4] = {0};
LLVMValueRef prim_stride_dw_vgpr = LLVMGetUndef(ctx->ac.i32);
int stream_for_buffer[4] = { -1, -1, -1, -1 };
- unsigned bufmask_for_stream[4] = {};
+ unsigned bufmask_for_stream[4] = {0};
bool isgs = ctx->stage == MESA_SHADER_GEOMETRY;
unsigned scratch_emit_base = isgs ? 4 : 0;
LLVMValueRef scratch_emit_basev = isgs ? i32_4 : ctx->ac.i32_0;
* because LLVM can't generate divide-by-multiply if we try to do this
* via VALU with one lane per buffer.
*/
- LLVMValueRef max_emit[4] = {};
+ LLVMValueRef max_emit[4] = {0};
for (unsigned buffer = 0; buffer < 4; ++buffer) {
if (stream_for_buffer[buffer] == -1)
continue;
/* Determine the workgroup-relative per-thread / primitive offset into
* the streamout buffers */
- struct ac_wg_scan primemit_scan[4] = {};
+ struct ac_wg_scan primemit_scan[4] = {0};
if (isgs) {
for (unsigned stream = 0; stream < 4; ++stream) {
ac_build_s_barrier(&ctx->ac);
/* Fetch the per-buffer offsets and per-stream emit counts in all waves. */
- LLVMValueRef wgoffset_dw[4] = {};
+ LLVMValueRef wgoffset_dw[4] = {0};
{
LLVMValueRef scratch_vgpr;
/* Streamout */
if (ctx->args->shader_info->so.num_outputs) {
- struct ngg_streamout nggso = {};
+ struct ngg_streamout nggso = {0};
nggso.num_vertices = num_vertices_val;
nggso.prim_enable[0] = is_gs_thread;
*/
ac_build_ifcc(&ctx->ac, is_gs_thread, 6001);
{
- struct ac_ngg_prim prim = {};
+ struct ac_ngg_prim prim = {0};
if (ctx->args->options->key.vs_common_out.as_ngg_passthrough) {
prim.passthrough = ac_get_arg(&ctx->ac, ctx->args->gs_vtx_offset[0]);
/* Streamout */
if (ctx->args->shader_info->so.num_outputs) {
- struct ngg_streamout nggso = {};
+ struct ngg_streamout nggso = {0};
nggso.num_vertices = LLVMConstInt(ctx->ac.i32, verts_per_prim, false);
/* Inclusive scan addition across the current wave. */
LLVMValueRef vertlive = LLVMBuildLoad(builder, vertliveptr, "");
- struct ac_wg_scan vertlive_scan = {};
+ struct ac_wg_scan vertlive_scan = {0};
vertlive_scan.op = nir_op_iadd;
vertlive_scan.enable_reduce = true;
vertlive_scan.enable_exclusive = true;
ac_build_ifcc(&ctx->ac, tmp, 5140);
{
LLVMValueRef flags;
- struct ac_ngg_prim prim = {};
+ struct ac_ngg_prim prim = {0};
prim.num_vertices = verts_per_prim;
tmp = ngg_gs_vertex_ptr(ctx, tid);
bool blend_need_alpha)
{
const struct vk_format_description *desc = vk_format_description(vk_format);
- struct ac_spi_color_formats formats = {};
+ struct ac_spi_color_formats formats = {0};
unsigned format, ntype, swap;
format = radv_translate_colorformat(vk_format);
struct radv_shader_binary *gs_copy_binary = NULL;
if (!pipeline->gs_copy_shader &&
!radv_pipeline_has_ngg(pipeline)) {
- struct radv_shader_info info = {};
- struct radv_shader_variant_key key = {};
+ struct radv_shader_info info = {0};
+ struct radv_shader_variant_key key = {0};
key.has_multiview_view_index =
keys[MESA_SHADER_GEOMETRY].has_multiview_view_index;
const struct radv_thread_trace *thread_trace,
FILE *output)
{
- struct sqtt_file_chunk_asic_info asic_info = {};
- struct sqtt_file_chunk_cpu_info cpu_info = {};
- struct sqtt_file_chunk_api_info api_info = {};
- struct sqtt_file_header header = {};
+ struct sqtt_file_chunk_asic_info asic_info = {0};
+ struct sqtt_file_chunk_cpu_info cpu_info = {0};
+ struct sqtt_file_chunk_api_info api_info = {0};
+ struct sqtt_file_header header = {0};
size_t file_offset = 0;
/* SQTT header file. */
for (unsigned i = 0; i < thread_trace->num_traces; i++) {
const struct radv_thread_trace_se *se = &thread_trace->traces[i];
const struct radv_thread_trace_info *info = &se->info;
- struct sqtt_file_chunk_sqtt_desc desc = {};
- struct sqtt_file_chunk_sqtt_data data = {};
+ struct sqtt_file_chunk_sqtt_desc desc = {0};
+ struct sqtt_file_chunk_sqtt_data data = {0};
uint64_t size = info->cur_offset * 32; /* unit of 32 bytes */
/* SQTT desc chunk. */
options->debug.func = radv_compiler_debug;
options->debug.private_data = &debug_data;
- struct radv_shader_args args = {};
+ struct radv_shader_args args = {0};
args.options = options;
args.shader_info = info;
args.is_gs_copy_shader = gs_copy_shader;
unsigned lds_multiplier = device->physical_device->rad_info.chip_class >= GFX7 ? 512 : 256;
struct ac_shader_config *conf = &variant->config;
- VkShaderStatisticsInfoAMD statistics = {};
+ VkShaderStatisticsInfoAMD statistics = {0};
statistics.shaderStageMask = shaderStage;
statistics.numPhysicalVgprs = device->physical_device->rad_info.num_physical_wave64_vgprs_per_simd;
statistics.numPhysicalSgprs = device->physical_device->rad_info.num_physical_sgprs_per_simd;
uint32_t prop_count = 0;
VkResult result;
- VkPipelineInfoKHR pipeline_info = {};
+ VkPipelineInfoKHR pipeline_info = {0};
pipeline_info.sType = VK_STRUCTURE_TYPE_PIPELINE_INFO_KHR;
pipeline_info.pipeline = radv_pipeline_to_handle(pipeline);
uint32_t stat_count = 0;
VkResult result;
- VkPipelineExecutableInfoKHR exec_info = {};
+ VkPipelineExecutableInfoKHR exec_info = {0};
exec_info.pipeline = radv_pipeline_to_handle(pipeline);
exec_info.executableIndex = i;
void *data_ptr = thread_trace_ptr + data_offset;
struct radv_thread_trace_info *info =
(struct radv_thread_trace_info *)info_ptr;
- struct radv_thread_trace_se thread_trace_se = {};
+ struct radv_thread_trace_se thread_trace_se = {0};
if (!radv_is_thread_trace_complete(device, info)) {
uint32_t expected_size =
struct radv_amdgpu_ctx *ctx = radv_amdgpu_ctx(_ctx);
struct radv_amdgpu_fence *fence = (struct radv_amdgpu_fence *)_fence;
struct drm_amdgpu_bo_list_entry *handles = NULL;
- struct radv_amdgpu_cs_request request = {};
+ struct radv_amdgpu_cs_request request = {0};
struct amdgpu_cs_ib_info *ibs;
struct radv_amdgpu_cs *cs0;
struct radv_amdgpu_winsys *aws;