rq_variable *stack;
uint32_t shared_base;
uint32_t stack_entries;
+
+ nir_intrinsic_instr *initialize;
};
#define VAR_NAME(name) strcat(strcpy(ralloc_size(ctx, strlen(base_name) + strlen(name) + 1), base_name), name)
rq_store_var(b, index, vars->trav.top_stack, nir_imm_int(b, -1), 1);
rq_store_var(b, index, vars->incomplete, nir_imm_bool(b, !(instance->debug_flags & RADV_DEBUG_NO_RT)), 0x1);
+
+ vars->initialize = instr;
}
static nir_def *
}
static nir_def *
-lower_rq_proceed(nir_builder *b, nir_def *index, struct ray_query_vars *vars, struct radv_device *device)
+lower_rq_proceed(nir_builder *b, nir_def *index, nir_intrinsic_instr *instr, struct ray_query_vars *vars,
+ struct radv_device *device)
{
+ nir_metadata_require(nir_cf_node_get_function(&instr->instr.block->cf_node), nir_metadata_dominance);
+
+ bool ignore_cull_mask = false;
+ if (nir_block_dominates(vars->initialize->instr.block, instr->instr.block)) {
+ nir_src cull_mask = vars->initialize->src[3];
+ if (nir_src_is_const(cull_mask) && nir_src_as_uint(cull_mask) == 0xFF)
+ ignore_cull_mask = true;
+ }
+
nir_variable *inv_dir = nir_local_variable_create(b->impl, glsl_vector_type(GLSL_TYPE_FLOAT, 3), "inv_dir");
nir_store_var(b, inv_dir, nir_frcp(b, rq_load_var(b, index, vars->trav.direction)), 0x7);
.dir = rq_load_var(b, index, vars->direction),
.vars = trav_vars,
.stack_entries = vars->stack_entries,
+ .ignore_cull_mask = ignore_cull_mask,
.stack_store_cb = store_stack_entry,
.stack_load_cb = load_stack_entry,
.aabb_cb = handle_candidate_aabb,
new_dest = lower_rq_load(&builder, index, intrinsic, vars);
break;
case nir_intrinsic_rq_proceed:
- new_dest = lower_rq_proceed(&builder, index, vars, device);
+ new_dest = lower_rq_proceed(&builder, index, intrinsic, vars, device);
break;
case nir_intrinsic_rq_terminate:
lower_rq_terminate(&builder, index, intrinsic, vars);
nir_store_deref(b, args->vars.sbt_offset_and_flags, nir_channel(b, instance_data, 3), 1);
- nir_def *instance_and_mask = nir_channel(b, instance_data, 2);
- nir_push_if(b, nir_ult(b, nir_iand(b, instance_and_mask, args->cull_mask), nir_imm_int(b, 1 << 24)));
- {
- nir_jump(b, nir_jump_continue);
+ if (!args->ignore_cull_mask) {
+ nir_def *instance_and_mask = nir_channel(b, instance_data, 2);
+ nir_push_if(b, nir_ult(b, nir_iand(b, instance_and_mask, args->cull_mask), nir_imm_int(b, 1 << 24)));
+ {
+ nir_jump(b, nir_jump_continue);
+ }
+ nir_pop_if(b, NULL);
}
- nir_pop_if(b, NULL);
nir_store_deref(b, args->vars.top_stack, nir_load_deref(b, args->vars.stack), 1);
nir_store_deref(b, args->vars.bvh_base, nir_pack_64_2x32(b, nir_trim_vector(b, instance_data, 2)), 1);