!gfx10_ngg_writes_user_edgeflags(shader);
}
-void gfx10_ngg_export_vertex(struct ac_shader_abi *abi)
-{
- struct si_shader_context *ctx = si_shader_context_from_abi(abi);
- struct si_shader_info *info = &ctx->shader->selector->info;
- struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
- LLVMValueRef *addrs = ctx->abi.outputs;
-
- unsigned num_outputs = info->num_outputs;
- /* if needed, nir ngg lower will append primitive id export at last */
- if (ctx->shader->key.ge.mono.u.vs_export_prim_id)
- num_outputs++;
-
- for (unsigned i = 0; i < num_outputs; i++) {
- if (i < info->num_outputs) {
- outputs[i].semantic = info->output_semantic[i];
- outputs[i].vertex_streams = info->output_streams[i];
- } else {
- outputs[i].semantic = VARYING_SLOT_PRIMITIVE_ID;
- outputs[i].vertex_streams = 0;
- }
-
- for (unsigned j = 0; j < 4; j++)
- outputs[i].values[j] =
- LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32, addrs[4 * i + j], "");
- }
-
- si_llvm_build_vs_exports(ctx, outputs, num_outputs);
-}
-
static void clamp_gsprims_to_esverts(unsigned *max_gsprims, unsigned max_esverts,
unsigned min_verts_per_prim, bool use_adjacency)
{
LLVMValueRef gfx10_get_thread_id_in_tg(struct si_shader_context *ctx);
unsigned gfx10_ngg_get_vertices_per_prim(struct si_shader *shader);
bool gfx10_ngg_export_prim_early(struct si_shader *shader);
-void gfx10_ngg_export_vertex(struct ac_shader_abi *abi);
unsigned gfx10_ngg_get_scratch_dw_size(struct si_shader *shader);
bool gfx10_ngg_calculate_subgroup_info(struct si_shader *shader);
return index;
}
+static void si_llvm_export_vertex(struct ac_shader_abi *abi)
+{
+ struct si_shader_context *ctx = si_shader_context_from_abi(abi);
+ struct si_shader_info *info = &ctx->shader->selector->info;
+ struct si_shader_output_values outputs[PIPE_MAX_SHADER_OUTPUTS];
+ LLVMValueRef *addrs = ctx->abi.outputs;
+
+ unsigned num_outputs = info->num_outputs;
+ /* if needed, nir lower will append primitive id export at last */
+ if (ctx->shader->key.ge.mono.u.vs_export_prim_id)
+ num_outputs++;
+
+ for (unsigned i = 0; i < num_outputs; i++) {
+ if (i < info->num_outputs) {
+ outputs[i].semantic = info->output_semantic[i];
+ outputs[i].vertex_streams = info->output_streams[i];
+ } else {
+ outputs[i].semantic = VARYING_SLOT_PRIMITIVE_ID;
+ outputs[i].vertex_streams = 0;
+ }
+
+ for (unsigned j = 0; j < 4; j++)
+ outputs[i].values[j] =
+ LLVMBuildLoad2(ctx->ac.builder, ctx->ac.f32, addrs[4 * i + j], "");
+ }
+
+ si_llvm_build_vs_exports(ctx, outputs, num_outputs);
+}
+
bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shader,
struct nir_shader *nir, bool free_nir)
{
ctx->num_images = info->base.num_images;
ctx->abi.intrinsic_load = si_llvm_load_intrinsic;
- ctx->abi.export_vertex = gfx10_ngg_export_vertex;
+ ctx->abi.export_vertex = si_llvm_export_vertex;
ctx->abi.load_sampler_desc = si_llvm_load_sampler_desc;
si_llvm_create_main_func(ctx);