From: Jason Ekstrand Date: Wed, 20 Jan 2016 02:58:31 +0000 (-0800) Subject: nir/spirv: Move OpPhi handling to vtn_cfg.c X-Git-Tag: upstream/17.1.0~11012^2~616 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=c7203aa621ee5cbc4a10fd5ae9a3d10dd38b8a98;p=platform%2Fupstream%2Fmesa.git nir/spirv: Move OpPhi handling to vtn_cfg.c Phi handling is somewhat intrinsically tied to the CFG. Moving it here makes it a bit easier to handle that. In particular, we can now do SSA repair after we've done the phi node second-pass. This fixes 6 CTS tests. --- diff --git a/src/glsl/nir/spirv/spirv_to_nir.c b/src/glsl/nir/spirv/spirv_to_nir.c index a117175..56fcd93 100644 --- a/src/glsl/nir/spirv/spirv_to_nir.c +++ b/src/glsl/nir/spirv/spirv_to_nir.c @@ -148,10 +148,6 @@ vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant, return val; } -static struct vtn_ssa_value * -vtn_variable_load(struct vtn_builder *b, nir_deref_var *src, - struct vtn_type *src_type); - struct vtn_ssa_value * vtn_ssa_value(struct vtn_builder *b, uint32_t value_id) { @@ -1751,7 +1747,7 @@ variable_is_external_block(nir_variable *var) var->data.mode == nir_var_shader_storage); } -static struct vtn_ssa_value * +struct vtn_ssa_value * vtn_variable_load(struct vtn_builder *b, nir_deref_var *src, struct vtn_type *src_type) { @@ -3193,61 +3189,6 @@ vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode, nir_builder_instr_insert(&b->nb, &intrin->instr); } -static void -vtn_handle_phi_first_pass(struct vtn_builder *b, const uint32_t *w) -{ - /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot. - * For each phi, we create a variable with the appropreate type and do a - * load from that variable. Then, in a second pass, we add stores to - * that variable to each of the predecessor blocks. - * - * We could do something more intelligent here. However, in order to - * handle loops and things properly, we really need dominance - * information. It would end up basically being the into-SSA algorithm - * all over again. It's easier if we just let lower_vars_to_ssa do that - * for us instead of repeating it here. - */ - struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); - - struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - nir_variable *phi_var = - nir_local_variable_create(b->nb.impl, type->type, "phi"); - _mesa_hash_table_insert(b->phi_table, w, phi_var); - - val->ssa = vtn_variable_load(b, nir_deref_var_create(b, phi_var), type); -} - -static bool -vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode, - const uint32_t *w, unsigned count) -{ - if (opcode == SpvOpLabel) { - b->block = vtn_value(b, w[1], vtn_value_type_block)->block; - return true; - } - - if (opcode != SpvOpPhi) - return true; - - struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w); - assert(phi_entry); - nir_variable *phi_var = phi_entry->data; - - struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; - - for (unsigned i = 3; i < count; i += 2) { - struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]); - struct vtn_block *pred = - vtn_value(b, w[i + 1], vtn_value_type_block)->block; - - b->nb.cursor = nir_after_block_before_jump(pred->end_block); - - vtn_variable_store(b, src, nir_deref_var_create(b, phi_var), type); - } - - return true; -} - static unsigned gl_primitive_from_spv_execution_mode(SpvExecutionMode mode) { @@ -3775,10 +3716,6 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode, vtn_handle_composite(b, opcode, w, count); break; - case SpvOpPhi: - vtn_handle_phi_first_pass(b, w); - break; - case SpvOpEmitVertex: case SpvOpEndPrimitive: case SpvOpEmitStreamVertex: @@ -3851,11 +3788,8 @@ spirv_to_nir(const uint32_t *words, size_t word_count, b->impl = func->impl; b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer, _mesa_key_pointer_equal); - b->phi_table = _mesa_hash_table_create(b, _mesa_hash_pointer, - _mesa_key_pointer_equal); + vtn_function_emit(b, func, vtn_handle_body_instruction); - vtn_foreach_instruction(b, func->start_block->label, func->end, - vtn_handle_phi_second_pass); } assert(b->entry_point->value_type == vtn_value_type_function); diff --git a/src/glsl/nir/spirv/vtn_cfg.c b/src/glsl/nir/spirv/vtn_cfg.c index 9c2e271..0d3702c 100644 --- a/src/glsl/nir/spirv/vtn_cfg.c +++ b/src/glsl/nir/spirv/vtn_cfg.c @@ -451,6 +451,66 @@ vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end) } } +static bool +vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + if (opcode == SpvOpLabel) + return true; /* Nothing to do */ + + /* If this isn't a phi node, stop. */ + if (opcode != SpvOpPhi) + return false; + + /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot. + * For each phi, we create a variable with the appropreate type and + * do a load from that variable. Then, in a second pass, we add + * stores to that variable to each of the predecessor blocks. + * + * We could do something more intelligent here. However, in order to + * handle loops and things properly, we really need dominance + * information. It would end up basically being the into-SSA + * algorithm all over again. It's easier if we just let + * lower_vars_to_ssa do that for us instead of repeating it here. + */ + struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa); + + struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; + nir_variable *phi_var = + nir_local_variable_create(b->nb.impl, type->type, "phi"); + _mesa_hash_table_insert(b->phi_table, w, phi_var); + + val->ssa = vtn_variable_load(b, nir_deref_var_create(b, phi_var), type); + + return true; +} + +static bool +vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode, + const uint32_t *w, unsigned count) +{ + if (opcode != SpvOpPhi) + return true; + + struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w); + assert(phi_entry); + nir_variable *phi_var = phi_entry->data; + + struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type; + + for (unsigned i = 3; i < count; i += 2) { + struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]); + struct vtn_block *pred = + vtn_value(b, w[i + 1], vtn_value_type_block)->block; + + b->nb.cursor = nir_after_block_before_jump(pred->end_block); + + vtn_variable_store(b, src, nir_deref_var_create(b, phi_var), type); + } + + return true; +} + static void vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type, nir_variable *switch_fall_var, bool *has_switch_break) @@ -492,9 +552,14 @@ vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list, case vtn_cf_node_type_block: { struct vtn_block *block = (struct vtn_block *)node; - vtn_foreach_instruction(b, block->label, - block->merge ? block->merge : block->branch, - handler); + const uint32_t *block_start = block->label; + const uint32_t *block_end = block->merge ? block->merge : + block->branch; + + block_start = vtn_foreach_instruction(b, block_start, block_end, + vtn_handle_phis_first_pass); + + vtn_foreach_instruction(b, block_start, block_end, handler); block->end_block = nir_cursor_current_block(b->nb.cursor); @@ -682,9 +747,14 @@ vtn_function_emit(struct vtn_builder *b, struct vtn_function *func, nir_builder_init(&b->nb, func->impl); b->nb.cursor = nir_after_cf_list(&func->impl->body); b->has_loop_continue = false; + b->phi_table = _mesa_hash_table_create(b, _mesa_hash_pointer, + _mesa_key_pointer_equal); vtn_emit_cf_list(b, &func->body, NULL, NULL, instruction_handler); + vtn_foreach_instruction(b, func->start_block->label, func->end, + vtn_handle_phi_second_pass); + /* Continue blocks for loops get inserted before the body of the loop * but instructions in the continue may use SSA defs in the loop body. * Therefore, we need to repair SSA to insert the needed phi nodes. diff --git a/src/glsl/nir/spirv/vtn_private.h b/src/glsl/nir/spirv/vtn_private.h index a0cf1b9..9a066d6 100644 --- a/src/glsl/nir/spirv/vtn_private.h +++ b/src/glsl/nir/spirv/vtn_private.h @@ -383,6 +383,10 @@ struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b, struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src); +struct vtn_ssa_value * +vtn_variable_load(struct vtn_builder *b, nir_deref_var *src, + struct vtn_type *src_type); + void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src, nir_deref_var *dest, struct vtn_type *dest_type);