fd2_draw_vbo(struct fd_context *ctx, const struct pipe_draw_info *pinfo,
unsigned index_offset)
{
- if (!ctx->prog.fp || !ctx->prog.vp)
+ if (!ctx->prog.fs || !ctx->prog.vs)
return false;
if (ctx->dirty & FD_DIRTY_VTXBUF)
if (dirty & (FD_DIRTY_PROG | FD_DIRTY_CONST)) {
emit_constants(ring, VS_CONST_BASE * 4,
&ctx->constbuf[PIPE_SHADER_VERTEX],
- (dirty & FD_DIRTY_PROG) ? ctx->prog.vp : NULL);
+ (dirty & FD_DIRTY_PROG) ? ctx->prog.vs : NULL);
}
if (dirty & FD_DIRTY_VIEWPORT) {
{
struct fd2_blend_stateobj *blend = fd2_blend_stateobj(ctx->blend);
struct fd2_zsa_stateobj *zsa = fd2_zsa_stateobj(ctx->zsa);
- struct fd2_shader_stateobj *fp = ctx->prog.fp;
+ struct fd2_shader_stateobj *fs = ctx->prog.fs;
struct fd_ringbuffer *ring = ctx->batch->draw;
/* NOTE: we probably want to eventually refactor this so each state
struct pipe_stencil_ref *sr = &ctx->stencil_ref;
uint32_t val = zsa->rb_depthcontrol;
- if (fp->has_kill)
+ if (fs->has_kill)
val &= ~A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE;
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
if (dirty & (FD_DIRTY_PROG | FD_DIRTY_CONST)) {
emit_constants(ring, VS_CONST_BASE * 4,
&ctx->constbuf[PIPE_SHADER_VERTEX],
- (dirty & FD_DIRTY_PROG) ? ctx->prog.vp : NULL);
+ (dirty & FD_DIRTY_PROG) ? ctx->prog.vs : NULL);
emit_constants(ring, PS_CONST_BASE * 4,
&ctx->constbuf[PIPE_SHADER_FRAGMENT],
- (dirty & FD_DIRTY_PROG) ? ctx->prog.fp : NULL);
+ (dirty & FD_DIRTY_PROG) ? ctx->prog.fs : NULL);
}
if (dirty & (FD_DIRTY_BLEND | FD_DIRTY_ZSA)) {
bool binning = (ctx->batch && ring == ctx->batch->binning);
unsigned variant = 0;
- vp = prog->vp;
+ vp = prog->vs;
/* find variant matching the linked fragment shader */
if (!binning) {
- fp = prog->fp;
+ fp = prog->fs;
for (variant = 1; variant < ARRAY_SIZE(vp->variant); variant++) {
/* if checked all variants, compile a new variant */
if (!vp->variant[variant].info.sizedwords) {
/* XXX maybe its possible to reuse patch_vtx_fetch somehow? */
prog = &ctx->solid_prog;
- so = prog->vp;
- ir2_compile(prog->vp, 1, prog->fp);
+ so = prog->vs;
+ ir2_compile(prog->vs, 1, prog->fs);
#define IR2_FETCH_SWIZ_XY01 0xb08
#define IR2_FETCH_SWIZ_XYZ1 0xa88
instr->dst_swiz = IR2_FETCH_SWIZ_XYZ1;
prog = &ctx->blit_prog[0];
- so = prog->vp;
- ir2_compile(prog->vp, 1, prog->fp);
+ so = prog->vs;
+ ir2_compile(prog->vs, 1, prog->fs);
info = &so->variant[1].info;
.sprite_coord_mode = ctx->rasterizer->sprite_coord_mode,
};
- if (fd3_needs_manual_clipping(ctx->prog.vp, ctx->rasterizer))
+ if (fd3_needs_manual_clipping(ctx->prog.vs, ctx->rasterizer))
emit.key.ucp_enables = ctx->rasterizer->clip_plane_enable;
fixup_shader_state(ctx, &emit.key);
/* and now binning pass: */
emit.binning_pass = true;
emit.dirty = dirty & ~(FD_DIRTY_BLEND);
- emit.vp = NULL; /* we changed key so need to refetch vp */
- emit.fp = NULL;
+ emit.vs = NULL; /* we changed key so need to refetch vs */
+ emit.fs = NULL;
draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
fd_context_all_clean(ctx);
bool rasterflat;
/* cached to avoid repeated lookups of same variants: */
- const struct ir3_shader_variant *vp, *fp;
+ const struct ir3_shader_variant *vs, *fs;
};
static inline const struct ir3_shader_variant *
fd3_emit_get_vp(struct fd3_emit *emit)
{
- if (!emit->vp) {
- struct ir3_shader *shader = emit->prog->vp;
- emit->vp = ir3_shader_variant(shader, emit->key,
+ if (!emit->vs) {
+ struct ir3_shader *shader = emit->prog->vs;
+ emit->vs = ir3_shader_variant(shader, emit->key,
emit->binning_pass, emit->debug);
}
- return emit->vp;
+ return emit->vs;
}
static inline const struct ir3_shader_variant *
fd3_emit_get_fp(struct fd3_emit *emit)
{
- if (!emit->fp) {
+ if (!emit->fs) {
if (emit->binning_pass) {
/* use dummy stateobj to simplify binning vs non-binning: */
- static const struct ir3_shader_variant binning_fp = {};
- emit->fp = &binning_fp;
+ static const struct ir3_shader_variant binning_fs = {};
+ emit->fs = &binning_fs;
} else {
- struct ir3_shader *shader = emit->prog->fp;
- emit->fp = ir3_shader_variant(shader, emit->key,
+ struct ir3_shader *shader = emit->prog->fs;
+ emit->fs = ir3_shader_variant(shader, emit->key,
false, emit->debug);
}
}
- return emit->fp;
+ return emit->fs;
}
void fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit);
if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
emit.prog = &ctx->blit_prog[pfb->nr_cbufs - 1];
- emit.fp = NULL; /* frag shader changed so clear cache */
+ emit.fs = NULL; /* frag shader changed so clear cache */
fd3_program_emit(ring, &emit, pfb->nr_cbufs, pfb->cbufs);
emit_mem2gmem_surf(batch, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
}
emit.prog = &ctx->blit_zs;
emit.key.half_precision = false;
}
- emit.fp = NULL; /* frag shader changed so clear cache */
+ emit.fs = NULL; /* frag shader changed so clear cache */
fd3_program_emit(ring, &emit, 1, &pfb->zsbuf);
emit_mem2gmem_surf(batch, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
}
/* and now binning pass: */
emit.binning_pass = true;
emit.dirty = dirty & ~(FD_DIRTY_BLEND);
- emit.vp = NULL; /* we changed key so need to refetch vp */
- emit.fp = NULL;
+ emit.vs = NULL; /* we changed key so need to refetch vs */
+ emit.fs = NULL;
draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
fd_context_all_clean(ctx);
bool no_decode_srgb;
/* cached to avoid repeated lookups of same variants: */
- const struct ir3_shader_variant *vp, *fp;
+ const struct ir3_shader_variant *vs, *fs;
/* TODO: other shader stages.. */
};
static inline const struct ir3_shader_variant *
fd4_emit_get_vp(struct fd4_emit *emit)
{
- if (!emit->vp) {
- struct ir3_shader *shader = emit->prog->vp;
- emit->vp = ir3_shader_variant(shader, emit->key,
+ if (!emit->vs) {
+ struct ir3_shader *shader = emit->prog->vs;
+ emit->vs = ir3_shader_variant(shader, emit->key,
emit->binning_pass, emit->debug);
}
- return emit->vp;
+ return emit->vs;
}
static inline const struct ir3_shader_variant *
fd4_emit_get_fp(struct fd4_emit *emit)
{
- if (!emit->fp) {
+ if (!emit->fs) {
if (emit->binning_pass) {
/* use dummy stateobj to simplify binning vs non-binning: */
- static const struct ir3_shader_variant binning_fp = {};
- emit->fp = &binning_fp;
+ static const struct ir3_shader_variant binning_fs = {};
+ emit->fs = &binning_fs;
} else {
- struct ir3_shader *shader = emit->prog->fp;
- emit->fp = ir3_shader_variant(shader, emit->key,
+ struct ir3_shader *shader = emit->prog->fs;
+ emit->fs = ir3_shader_variant(shader, emit->key,
false, emit->debug);
}
}
- return emit->fp;
+ return emit->fs;
}
void fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit);
if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
emit.prog = &ctx->blit_prog[pfb->nr_cbufs - 1];
- emit.fp = NULL; /* frag shader changed so clear cache */
+ emit.fs = NULL; /* frag shader changed so clear cache */
fd4_program_emit(ring, &emit, pfb->nr_cbufs, pfb->cbufs);
emit_mem2gmem_surf(batch, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
}
emit.key.half_precision = true;
break;
}
- emit.fp = NULL; /* frag shader changed so clear cache */
+ emit.fs = NULL; /* frag shader changed so clear cache */
fd4_program_emit(ring, &emit, 1, &pfb->zsbuf);
emit_mem2gmem_surf(batch, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
}
/* and now binning pass: */
emit.binning_pass = true;
emit.dirty = dirty & ~(FD_DIRTY_BLEND);
- emit.vp = NULL; /* we changed key so need to refetch vp */
- emit.fp = NULL;
+ emit.vs = NULL; /* we changed key so need to refetch vp */
+ emit.fs = NULL;
draw_impl(ctx, ctx->batch->binning, &emit, index_offset);
if (emit.streamout_mask) {
bool no_lrz_write;
/* cached to avoid repeated lookups of same variants: */
- const struct ir3_shader_variant *vp, *fp;
+ const struct ir3_shader_variant *vs, *fs;
/* TODO: other shader stages.. */
unsigned streamout_mask;
static inline const struct ir3_shader_variant *
fd5_emit_get_vp(struct fd5_emit *emit)
{
- if (!emit->vp) {
- struct ir3_shader *shader = emit->prog->vp;
- emit->vp = ir3_shader_variant(shader, emit->key,
+ if (!emit->vs) {
+ struct ir3_shader *shader = emit->prog->vs;
+ emit->vs = ir3_shader_variant(shader, emit->key,
emit->binning_pass, emit->debug);
}
- return emit->vp;
+ return emit->vs;
}
static inline const struct ir3_shader_variant *
fd5_emit_get_fp(struct fd5_emit *emit)
{
- if (!emit->fp) {
+ if (!emit->fs) {
if (emit->binning_pass) {
/* use dummy stateobj to simplify binning vs non-binning: */
- static const struct ir3_shader_variant binning_fp = {};
- emit->fp = &binning_fp;
+ static const struct ir3_shader_variant binning_fs = {};
+ emit->fs = &binning_fs;
} else {
- struct ir3_shader *shader = emit->prog->fp;
- emit->fp = ir3_shader_variant(shader, emit->key,
+ struct ir3_shader *shader = emit->prog->fs;
+ emit->fs = ir3_shader_variant(shader, emit->key,
false, emit->debug);
}
}
- return emit->fp;
+ return emit->fs;
}
static inline void
.vtx = &ctx->vtx,
.info = info,
.key = {
- .vs = ctx->prog.vp,
- .fs = ctx->prog.fp,
+ .vs = ctx->prog.vs,
+ .fs = ctx->prog.fs,
.key = {
.color_two_side = ctx->rasterizer->light_twoside,
.vclamp_color = ctx->rasterizer->clamp_vertex_color,
struct fd_context *ctx = emit->ctx;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
const struct fd6_program_state *prog = fd6_emit_get_prog(emit);
- const struct ir3_shader_variant *vp = emit->vs;
- const struct ir3_shader_variant *fp = emit->fs;
+ const struct ir3_shader_variant *vs = emit->vs;
+ const struct ir3_shader_variant *fs = emit->fs;
const enum fd_dirty_3d_state dirty = emit->dirty;
bool needs_border = false;
* we might at some point decide to do sysmem in some cases when
* blend is enabled:
*/
- if (fp->fb_read)
+ if (fs->fb_read)
ctx->batch->gmem_reason |= FD_GMEM_FB_READ;
if (emit->dirty & (FD_DIRTY_VTXBUF | FD_DIRTY_VTXSTATE)) {
nr = 0;
OUT_PKT4(ring, REG_A6XX_RB_FS_OUTPUT_CNTL0, 2);
- OUT_RING(ring, COND(fp->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |
- COND(fp->writes_smask && pfb->samples > 1,
+ OUT_RING(ring, COND(fs->writes_pos, A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z) |
+ COND(fs->writes_smask && pfb->samples > 1,
A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_SAMPMASK));
OUT_RING(ring, A6XX_RB_FS_OUTPUT_CNTL1_MRT(nr));
OUT_RING(ring, A6XX_SP_FS_OUTPUT_CNTL1_MRT(nr));
}
- fd6_emit_consts(emit, vp, PIPE_SHADER_VERTEX, FD6_GROUP_VS_CONST, 0x7);
- fd6_emit_consts(emit, fp, PIPE_SHADER_FRAGMENT, FD6_GROUP_FS_CONST, 0x6);
+ fd6_emit_consts(emit, vs, PIPE_SHADER_VERTEX, FD6_GROUP_VS_CONST, 0x7);
+ fd6_emit_consts(emit, fs, PIPE_SHADER_FRAGMENT, FD6_GROUP_FS_CONST, 0x6);
/* if driver-params are needed, emit each time: */
- if (ir3_needs_vs_driver_params(vp)) {
+ if (ir3_needs_vs_driver_params(vs)) {
struct fd_ringbuffer *dpconstobj = fd_submit_new_ringbuffer(
ctx->batch->submit, IR3_DP_VS_COUNT * 4, FD_RINGBUFFER_STREAMING);
- ir3_emit_vs_driver_params(vp, dpconstobj, ctx, emit->info);
+ ir3_emit_vs_driver_params(vs, dpconstobj, ctx, emit->info);
fd6_emit_take_group(emit, dpconstobj, FD6_GROUP_VS_DRIVER_PARAMS, 0x7);
} else {
fd6_emit_take_group(emit, NULL, FD6_GROUP_VS_DRIVER_PARAMS, 0x7);
}
- struct ir3_stream_output_info *info = &vp->shader->stream_output;
+ struct ir3_stream_output_info *info = &vs->shader->stream_output;
if (info->num_outputs)
fd6_emit_streamout(ring, emit, info);
OUT_RING(ring, A6XX_RB_BLEND_ALPHA_F32(bcolor->color[3]));
}
- needs_border |= fd6_emit_combined_textures(ring, emit, PIPE_SHADER_VERTEX, vp);
- needs_border |= fd6_emit_combined_textures(ring, emit, PIPE_SHADER_FRAGMENT, fp);
+ needs_border |= fd6_emit_combined_textures(ring, emit, PIPE_SHADER_VERTEX, vs);
+ needs_border |= fd6_emit_combined_textures(ring, emit, PIPE_SHADER_FRAGMENT, fs);
if (needs_border)
emit_border_color(ctx, ring);
FD_DIRTY_SHADER_PROG)
if (ctx->dirty_shader[PIPE_SHADER_FRAGMENT] & DIRTY_IBO) {
struct fd_ringbuffer *state =
- fd6_build_ibo_state(ctx, fp, PIPE_SHADER_FRAGMENT);
+ fd6_build_ibo_state(ctx, fs, PIPE_SHADER_FRAGMENT);
struct fd_ringbuffer *obj = fd_submit_new_ringbuffer(
ctx->batch->submit, 0x100, FD_RINGBUFFER_STREAMING);
- const struct ir3_ibo_mapping *mapping = &fp->image_mapping;
+ const struct ir3_ibo_mapping *mapping = &fs->image_mapping;
OUT_PKT7(obj, CP_LOAD_STATE6, 3);
OUT_RING(obj, CP_LOAD_STATE6_0_DST_OFF(0) |
OUT_PKT4(obj, REG_A6XX_SP_IBO_COUNT, 1);
OUT_RING(obj, mapping->num_ibo);
- ir3_emit_ssbo_sizes(ctx->screen, fp, obj,
+ ir3_emit_ssbo_sizes(ctx->screen, fs, obj,
&ctx->shaderbuf[PIPE_SHADER_FRAGMENT]);
- ir3_emit_image_dims(ctx->screen, fp, obj,
+ ir3_emit_image_dims(ctx->screen, fs, obj,
&ctx->shaderimg[PIPE_SHADER_FRAGMENT]);
fd6_emit_take_group(emit, obj, FD6_GROUP_IBO, 0x6);
ctx->constbuf[PIPE_SHADER_FRAGMENT].cb);
util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vtx.vertexbuf.vb);
util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx.vtx);
- util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vp);
+ util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vs);
util_blitter_save_so_targets(ctx->blitter, ctx->streamout.num_targets,
ctx->streamout.targets);
util_blitter_save_rasterizer(ctx->blitter, ctx->rasterizer);
util_blitter_save_viewport(ctx->blitter, &ctx->viewport);
util_blitter_save_scissor(ctx->blitter, &ctx->scissor);
- util_blitter_save_fragment_shader(ctx->blitter, ctx->prog.fp);
+ util_blitter_save_fragment_shader(ctx->blitter, ctx->prog.fs);
util_blitter_save_blend(ctx->blitter, ctx->blend);
util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->zsa);
util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1,
&ctx->solid_vbuf_state.vertexbuf.vb[0]);
pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
- pctx->bind_vs_state(pctx, ctx->solid_prog.vp);
- pctx->bind_fs_state(pctx, ctx->solid_prog.fp);
+ pctx->bind_vs_state(pctx, ctx->solid_prog.vs);
+ pctx->bind_fs_state(pctx, ctx->solid_prog.fs);
struct pipe_draw_info info = {
.mode = PIPE_PRIM_MAX, /* maps to DI_PT_RECTLIST */
};
struct fd_program_stateobj {
- void *vp, *fp;
+ void *vs, *fs;
};
struct fd_constbuf_stateobj {
#include "freedreno_context.h"
static void
-fd_fp_state_bind(struct pipe_context *pctx, void *hwcso)
+fd_fs_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct fd_context *ctx = fd_context(pctx);
- ctx->prog.fp = hwcso;
+ ctx->prog.fs = hwcso;
ctx->dirty_shader[PIPE_SHADER_FRAGMENT] |= FD_DIRTY_SHADER_PROG;
ctx->dirty |= FD_DIRTY_PROG;
}
static void
-fd_vp_state_bind(struct pipe_context *pctx, void *hwcso)
+fd_vs_state_bind(struct pipe_context *pctx, void *hwcso)
{
struct fd_context *ctx = fd_context(pctx);
- ctx->prog.vp = hwcso;
+ ctx->prog.vs = hwcso;
ctx->dirty_shader[PIPE_SHADER_VERTEX] |= FD_DIRTY_SHADER_PROG;
ctx->dirty |= FD_DIRTY_PROG;
}
-static const char *solid_fp =
+static const char *solid_fs =
"FRAG \n"
"PROPERTY FS_COLOR0_WRITES_ALL_CBUFS 1 \n"
"DCL CONST[0] \n"
" 0: MOV OUT[0], CONST[0] \n"
" 1: END \n";
-static const char *solid_vp =
+static const char *solid_vs =
"VERT \n"
"DCL IN[0] \n"
"DCL OUT[0], POSITION \n"
" 0: MOV OUT[0], IN[0] \n"
" 1: END \n";
-static const char *blit_vp =
+static const char *blit_vs =
"VERT \n"
"DCL IN[0] \n"
"DCL IN[1] \n"
struct fd_context *ctx = fd_context(pctx);
int i;
- pctx->bind_fs_state = fd_fp_state_bind;
- pctx->bind_vs_state = fd_vp_state_bind;
+ pctx->bind_vs_state = fd_vs_state_bind;
+ pctx->bind_fs_state = fd_fs_state_bind;
- ctx->solid_prog.fp = assemble_tgsi(pctx, solid_fp, true);
- ctx->solid_prog.vp = assemble_tgsi(pctx, solid_vp, false);
- ctx->blit_prog[0].vp = assemble_tgsi(pctx, blit_vp, false);
- ctx->blit_prog[0].fp = fd_prog_blit(pctx, 1, false);
+ ctx->solid_prog.fs = assemble_tgsi(pctx, solid_fs, true);
+ ctx->solid_prog.vs = assemble_tgsi(pctx, solid_vs, false);
+ ctx->blit_prog[0].vs = assemble_tgsi(pctx, blit_vs, false);
+ ctx->blit_prog[0].fs = fd_prog_blit(pctx, 1, false);
if (ctx->screen->gpu_id < 300)
return;
for (i = 1; i < ctx->screen->max_rts; i++) {
- ctx->blit_prog[i].vp = ctx->blit_prog[0].vp;
- ctx->blit_prog[i].fp = fd_prog_blit(pctx, i + 1, false);
+ ctx->blit_prog[i].vs = ctx->blit_prog[0].vs;
+ ctx->blit_prog[i].fs = fd_prog_blit(pctx, i + 1, false);
}
- ctx->blit_z.vp = ctx->blit_prog[0].vp;
- ctx->blit_z.fp = fd_prog_blit(pctx, 0, true);
- ctx->blit_zs.vp = ctx->blit_prog[0].vp;
- ctx->blit_zs.fp = fd_prog_blit(pctx, 1, true);
+ ctx->blit_z.vs = ctx->blit_prog[0].vs;
+ ctx->blit_z.fs = fd_prog_blit(pctx, 0, true);
+ ctx->blit_zs.vs = ctx->blit_prog[0].vs;
+ ctx->blit_zs.fs = fd_prog_blit(pctx, 1, true);
}
void fd_prog_fini(struct pipe_context *pctx)
struct fd_context *ctx = fd_context(pctx);
int i;
- pctx->delete_vs_state(pctx, ctx->solid_prog.vp);
- pctx->delete_fs_state(pctx, ctx->solid_prog.fp);
- pctx->delete_vs_state(pctx, ctx->blit_prog[0].vp);
+ pctx->delete_vs_state(pctx, ctx->solid_prog.vs);
+ pctx->delete_fs_state(pctx, ctx->solid_prog.fs);
+ pctx->delete_vs_state(pctx, ctx->blit_prog[0].vs);
for (i = 0; i < ctx->screen->max_rts; i++)
- pctx->delete_fs_state(pctx, ctx->blit_prog[i].fp);
- pctx->delete_fs_state(pctx, ctx->blit_z.fp);
- pctx->delete_fs_state(pctx, ctx->blit_zs.fp);
+ pctx->delete_fs_state(pctx, ctx->blit_prog[i].fs);
+ pctx->delete_fs_state(pctx, ctx->blit_z.fs);
+ pctx->delete_fs_state(pctx, ctx->blit_zs.fs);
}