OUT_RING(ring, info->max_index); /* VGT_MAX_VTX_INDX */
OUT_RING(ring, info->min_index); /* VGT_MIN_VTX_INDX */
- fd_draw_emit(ctx, ring, ctx->primtypes[info->mode],
+ fd_draw_emit(ctx->batch, ring, ctx->primtypes[info->mode],
IGNORE_VISIBILITY, info);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
{
struct fd2_context *fd2_ctx = fd2_context(ctx);
struct fd_ringbuffer *ring = ctx->batch->draw;
- struct pipe_framebuffer_state *fb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *fb = &ctx->batch->framebuffer;
uint32_t reg, colr = 0;
if ((buffers & PIPE_CLEAR_COLOR) && fb->nr_cbufs)
OUT_RING(ring, 3); /* VGT_MAX_VTX_INDX */
OUT_RING(ring, 0); /* VGT_MIN_VTX_INDX */
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(ctx->batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 3, 0, INDEX_SIZE_IGN, 0, 0, NULL);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, xy2d(scissor->maxx, /* PA_SC_WINDOW_SCISSOR_BR */
scissor->maxy));
- ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx);
- ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny);
- ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx);
- ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy);
+ ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
+ ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
+ ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
+ ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
}
if (dirty & FD_DIRTY_VIEWPORT) {
/* transfer from gmem to system memory (ie. normal RAM) */
static void
-emit_gmem2mem_surf(struct fd_context *ctx, uint32_t base,
+emit_gmem2mem_surf(struct fd_batch *batch, uint32_t base,
struct pipe_surface *psurf)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_resource *rsc = fd_resource(psurf->texture);
uint32_t swap = fmt2swap(psurf->format);
OUT_RING(ring, 3); /* VGT_MAX_VTX_INDX */
OUT_RING(ring, 0); /* VGT_MIN_VTX_INDX */
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 3, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd2_emit_tile_gmem2mem(struct fd_context *ctx, struct fd_tile *tile)
+fd2_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
fd2_emit_vertex_bufs(ring, 0x9c, (struct fd2_vertex_buf[]) {
{ .prsc = fd2_ctx->solid_vertexbuf, .size = 48 },
OUT_RING(ring, A2XX_RB_COPY_DEST_OFFSET_X(tile->xoff) |
A2XX_RB_COPY_DEST_OFFSET_Y(tile->yoff));
- if (ctx->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
- emit_gmem2mem_surf(ctx, tile->bin_w * tile->bin_h, pfb->zsbuf);
+ if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
+ emit_gmem2mem_surf(batch, tile->bin_w * tile->bin_h, pfb->zsbuf);
- if (ctx->resolve & FD_BUFFER_COLOR)
- emit_gmem2mem_surf(ctx, 0, pfb->cbufs[0]);
+ if (batch->resolve & FD_BUFFER_COLOR)
+ emit_gmem2mem_surf(batch, 0, pfb->cbufs[0]);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A2XX_RB_MODECONTROL));
/* transfer from system memory to gmem */
static void
-emit_mem2gmem_surf(struct fd_context *ctx, uint32_t base,
+emit_mem2gmem_surf(struct fd_batch *batch, uint32_t base,
struct pipe_surface *psurf)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_resource *rsc = fd_resource(psurf->texture);
uint32_t swiz;
OUT_RING(ring, 3); /* VGT_MAX_VTX_INDX */
OUT_RING(ring, 0); /* VGT_MIN_VTX_INDX */
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 3, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd2_emit_tile_mem2gmem(struct fd_context *ctx, struct fd_tile *tile)
+fd2_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
unsigned bin_w = tile->bin_w;
unsigned bin_h = tile->bin_h;
float x0, y0, x1, y1;
OUT_RING(ring, CP_REG(REG_A2XX_PA_CL_CLIP_CNTL));
OUT_RING(ring, 0x00000000);
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
- emit_mem2gmem_surf(ctx, bin_w * bin_h, pfb->zsbuf);
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL))
+ emit_mem2gmem_surf(batch, bin_w * bin_h, pfb->zsbuf);
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_COLOR))
- emit_mem2gmem_surf(ctx, 0, pfb->cbufs[0]);
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR))
+ emit_mem2gmem_surf(batch, 0, pfb->cbufs[0]);
/* TODO blob driver seems to toss in a CACHE_FLUSH after each DRAW_INDX.. */
}
/* before first tile */
static void
-fd2_emit_tile_init(struct fd_context *ctx)
+fd2_emit_tile_init(struct fd_batch *batch)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_context *ctx = batch->ctx;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
uint32_t reg;
- fd2_emit_restore(ctx, ctx->ring);
+ fd2_emit_restore(ctx, ring);
OUT_PKT3(ring, CP_SET_CONSTANT, 4);
OUT_RING(ring, CP_REG(REG_A2XX_RB_SURFACE_INFO));
/* before mem2gmem */
static void
-fd2_emit_tile_prep(struct fd_context *ctx, struct fd_tile *tile)
+fd2_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
/* before IB to rendering cmds: */
static void
-fd2_emit_tile_renderprep(struct fd_context *ctx, struct fd_tile *tile)
+fd2_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
enum pipe_format format = pipe_surface_format(pfb->cbufs[0]);
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
{
struct fd3_context *fd3_ctx = fd3_context(fd_context(pctx));
- util_dynarray_fini(&fd3_ctx->rbrc_patches);
-
fd_bo_del(fd3_ctx->vs_pvt_mem);
fd_bo_del(fd3_ctx->fs_pvt_mem);
fd_bo_del(fd3_ctx->vsc_size_mem);
if (!pctx)
return NULL;
- util_dynarray_init(&fd3_ctx->rbrc_patches);
-
fd3_ctx->vs_pvt_mem = fd_bo_new(screen->dev, 0x2000,
DRM_FREEDRENO_GEM_TYPE_KMEM);
struct fd3_context {
struct fd_context base;
- /* Keep track of writes to RB_RENDER_CONTROL which need to be patched
- * once we know whether or not to use GMEM, and GMEM tile pitch.
- */
- struct util_dynarray rbrc_patches;
-
struct fd_bo *vs_pvt_mem, *fs_pvt_mem;
/* This only needs to be 4 * num_of_pipes bytes (ie. 32 bytes). We
(info->mode == PIPE_PRIM_POINTS))
primtype = DI_PT_POINTLIST_PSIZE;
- fd_draw_emit(ctx, ring,
- primtype,
+ fd_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
info);
}
fd3_emit_state(ctx, ring, &emit);
fd3_emit_vertex_bufs(ring, &emit);
- reset_viewport(ring, &ctx->framebuffer);
+ reset_viewport(ring, &ctx->batch->framebuffer);
OUT_PKT0(ring, REG_A3XX_PC_PRIM_VTX_CNTL, 1);
OUT_RING(ring, A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(0) |
fd_event_write(ctx, ring, PERFCOUNTER_STOP);
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(ctx->batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
struct fd_ringbuffer *ring = ctx->batch->draw;
unsigned dirty = ctx->dirty;
unsigned i;
/* emit generic state now: */
fd3_emit_state(ctx, ring, &emit);
- reset_viewport(ring, &ctx->framebuffer);
+ reset_viewport(ring, &ctx->batch->framebuffer);
OUT_PKT0(ring, REG_A3XX_RB_BLEND_ALPHA, 1);
OUT_RING(ring, A3XX_RB_BLEND_ALPHA_UINT(0xff) |
OUT_PKT0(ring, REG_A3XX_RB_RENDER_CONTROL, 1);
OUT_RINGP(ring, A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(FUNC_NEVER),
- &fd3_ctx->rbrc_patches);
+ &ctx->batch->rbrc_patches);
if (buffers & PIPE_CLEAR_DEPTH) {
OUT_PKT0(ring, REG_A3XX_RB_DEPTH_CONTROL, 1);
fd_event_write(ctx, ring, PERFCOUNTER_STOP);
- fd_draw(ctx, ring, DI_PT_RECTLIST, USE_VISIBILITY,
+ fd_draw(ctx->batch, ring, DI_PT_RECTLIST, USE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
*/
OUT_PKT0(ring, REG_A3XX_RB_RENDER_CONTROL, 1);
- OUT_RINGP(ring, val, &fd3_context(ctx)->rbrc_patches);
+ OUT_RINGP(ring, val, &ctx->batch->rbrc_patches);
}
if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_STENCIL_REF)) {
OUT_RING(ring, A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(scissor->maxx - 1) |
A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(scissor->maxy - 1));
- ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx);
- ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny);
- ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx);
- ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy);
+ ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
+ ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
+ ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
+ ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
}
if (dirty & FD_DIRTY_VIEWPORT) {
}
if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER | FD_DIRTY_BLEND_DUAL)) {
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
int nr_cbufs = pfb->nr_cbufs;
if (fd3_blend_stateobj(ctx->blend)->rb_render_control &
A3XX_RB_RENDER_CONTROL_DUAL_COLOR_IN_ENABLE)
uint32_t i;
for (i = 0; i < ARRAY_SIZE(blend->rb_mrt); i++) {
- enum pipe_format format = pipe_surface_format(ctx->framebuffer.cbufs[i]);
+ enum pipe_format format =
+ pipe_surface_format(ctx->batch->framebuffer.cbufs[i]);
const struct util_format_description *desc =
util_format_description(format);
bool is_float = util_format_is_float(format);
}
static bool
-use_hw_binning(struct fd_context *ctx)
+use_hw_binning(struct fd_batch *batch)
{
- struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
/* workaround: combining scissor optimization and hw binning
* seems problematic. Seems like we end up with a mismatch
}
/* workaround for (hlsq?) lockup with hw binning on a3xx patchlevel 0 */
-static void update_vsc_pipe(struct fd_context *ctx);
+static void update_vsc_pipe(struct fd_batch *batch);
static void
-emit_binning_workaround(struct fd_context *ctx)
+emit_binning_workaround(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd3_emit emit = {
.debug = &ctx->debug,
.vtx = &fd3_ctx->solid_vbuf_state,
/* transfer from gmem to system memory (ie. normal RAM) */
static void
-emit_gmem2mem_surf(struct fd_context *ctx,
+emit_gmem2mem_surf(struct fd_batch *batch,
enum adreno_rb_copy_control_mode mode,
bool stencil,
uint32_t base, struct pipe_surface *psurf)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_resource *rsc = fd_resource(psurf->texture);
enum pipe_format format = psurf->format;
if (stencil) {
A3XX_RB_COPY_DEST_INFO_ENDIAN(ENDIAN_NONE) |
A3XX_RB_COPY_DEST_INFO_SWAP(fd3_pipe2swap(format)));
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd3_emit_tile_gmem2mem(struct fd_context *ctx, struct fd_tile *tile)
+fd3_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd3_emit emit = {
.debug = &ctx->debug,
.vtx = &fd3_ctx->solid_vbuf_state,
fd3_program_emit(ring, &emit, 0, NULL);
fd3_emit_vertex_bufs(ring, &emit);
- if (ctx->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
+ if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
- if (!rsc->stencil || ctx->resolve & FD_BUFFER_DEPTH)
- emit_gmem2mem_surf(ctx, RB_COPY_DEPTH_STENCIL, false,
+ if (!rsc->stencil || batch->resolve & FD_BUFFER_DEPTH)
+ emit_gmem2mem_surf(batch, RB_COPY_DEPTH_STENCIL, false,
ctx->gmem.zsbuf_base[0], pfb->zsbuf);
- if (rsc->stencil && ctx->resolve & FD_BUFFER_STENCIL)
- emit_gmem2mem_surf(ctx, RB_COPY_DEPTH_STENCIL, true,
+ if (rsc->stencil && batch->resolve & FD_BUFFER_STENCIL)
+ emit_gmem2mem_surf(batch, RB_COPY_DEPTH_STENCIL, true,
ctx->gmem.zsbuf_base[1], pfb->zsbuf);
}
- if (ctx->resolve & FD_BUFFER_COLOR) {
+ if (batch->resolve & FD_BUFFER_COLOR) {
for (i = 0; i < pfb->nr_cbufs; i++) {
if (!pfb->cbufs[i])
continue;
- if (!(ctx->resolve & (PIPE_CLEAR_COLOR0 << i)))
+ if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
continue;
- emit_gmem2mem_surf(ctx, RB_COPY_RESOLVE, false,
+ emit_gmem2mem_surf(batch, RB_COPY_RESOLVE, false,
ctx->gmem.cbuf_base[i], pfb->cbufs[i]);
}
}
/* transfer from system memory to gmem */
static void
-emit_mem2gmem_surf(struct fd_context *ctx, uint32_t bases[],
+emit_mem2gmem_surf(struct fd_batch *batch, uint32_t bases[],
struct pipe_surface **psurf, uint32_t bufs, uint32_t bin_w)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct pipe_surface *zsbufs[2];
assert(bufs > 0);
OUT_PKT0(ring, REG_A3XX_RB_DEPTH_INFO, 2);
OUT_RING(ring, A3XX_RB_DEPTH_INFO_DEPTH_BASE(bases[0]) |
A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(DEPTHX_32));
- OUT_RING(ring, A3XX_RB_DEPTH_PITCH(4 * ctx->gmem.bin_w));
+ OUT_RING(ring, A3XX_RB_DEPTH_PITCH(4 * batch->ctx->gmem.bin_w));
if (psurf[0]->format == PIPE_FORMAT_Z32_FLOAT) {
OUT_PKT0(ring, REG_A3XX_RB_MRT_CONTROL(0), 1);
fd3_emit_gmem_restore_tex(ring, psurf, bufs);
- fd_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 0, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd3_emit_tile_mem2gmem(struct fd_context *ctx, struct fd_tile *tile)
+fd3_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd3_emit emit = {
.debug = &ctx->debug,
.vtx = &fd3_ctx->blit_vbuf_state,
bin_w = gmem->bin_w;
bin_h = gmem->bin_h;
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_COLOR)) {
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
emit.prog = &ctx->blit_prog[pfb->nr_cbufs - 1];
emit.fp = NULL; /* frag shader changed so clear cache */
fd3_program_emit(ring, &emit, pfb->nr_cbufs, pfb->cbufs);
- emit_mem2gmem_surf(ctx, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
+ emit_mem2gmem_surf(batch, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
}
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
if (pfb->zsbuf->format != PIPE_FORMAT_Z32_FLOAT_S8X24_UINT &&
pfb->zsbuf->format != PIPE_FORMAT_Z32_FLOAT) {
/* Non-float can use a regular color write. It's split over 8-bit
}
emit.fp = NULL; /* frag shader changed so clear cache */
fd3_program_emit(ring, &emit, 1, &pfb->zsbuf);
- emit_mem2gmem_surf(ctx, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
+ emit_mem2gmem_surf(batch, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
}
OUT_PKT0(ring, REG_A3XX_GRAS_SC_CONTROL, 1);
}
static void
-patch_draws(struct fd_context *ctx, enum pc_di_vis_cull_mode vismode)
+patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
{
unsigned i;
- for (i = 0; i < fd_patch_num_elements(&ctx->draw_patches); i++) {
- struct fd_cs_patch *patch = fd_patch_element(&ctx->draw_patches, i);
+ for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
+ struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
*patch->cs = patch->val | DRAW(0, 0, 0, vismode, 0);
}
- util_dynarray_resize(&ctx->draw_patches, 0);
+ util_dynarray_resize(&batch->draw_patches, 0);
}
static void
-patch_rbrc(struct fd_context *ctx, uint32_t val)
+patch_rbrc(struct fd_batch *batch, uint32_t val)
{
- struct fd3_context *fd3_ctx = fd3_context(ctx);
unsigned i;
- for (i = 0; i < fd_patch_num_elements(&fd3_ctx->rbrc_patches); i++) {
- struct fd_cs_patch *patch = fd_patch_element(&fd3_ctx->rbrc_patches, i);
+ for (i = 0; i < fd_patch_num_elements(&batch->rbrc_patches); i++) {
+ struct fd_cs_patch *patch = fd_patch_element(&batch->rbrc_patches, i);
*patch->cs = patch->val | val;
}
- util_dynarray_resize(&fd3_ctx->rbrc_patches, 0);
+ util_dynarray_resize(&batch->rbrc_patches, 0);
}
/* for rendering directly to system memory: */
static void
-fd3_emit_sysmem_prep(struct fd_context *ctx)
+fd3_emit_sysmem_prep(struct fd_batch *batch)
{
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- struct fd_ringbuffer *ring = ctx->ring;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
uint32_t i, pitch = 0;
for (i = 0; i < pfb->nr_cbufs; i++) {
pitch = fd_resource(psurf->texture)->slices[psurf->u.tex.level].pitch;
}
- fd3_emit_restore(ctx, ring);
+ fd3_emit_restore(batch->ctx, ring);
OUT_PKT0(ring, REG_A3XX_RB_FRAME_BUFFER_DIMENSION, 1);
OUT_RING(ring, A3XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(pfb->width) |
A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE |
A3XX_RB_MODE_CONTROL_MRT(MAX2(1, pfb->nr_cbufs) - 1));
- patch_draws(ctx, IGNORE_VISIBILITY);
- patch_rbrc(ctx, A3XX_RB_RENDER_CONTROL_BIN_WIDTH(pitch));
+ patch_draws(batch, IGNORE_VISIBILITY);
+ patch_rbrc(batch, A3XX_RB_RENDER_CONTROL_BIN_WIDTH(pitch));
}
static void
-update_vsc_pipe(struct fd_context *ctx)
+update_vsc_pipe(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
int i;
OUT_PKT0(ring, REG_A3XX_VSC_SIZE_ADDRESS, 1);
}
static void
-emit_binning_pass(struct fd_context *ctx)
+emit_binning_pass(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- struct fd_batch *batch = ctx->batch;
- struct fd_ringbuffer *ring = ctx->ring;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
int i;
uint32_t x1 = gmem->minx;
uint32_t y2 = gmem->miny + gmem->height - 1;
if (ctx->screen->gpu_id == 320) {
- emit_binning_workaround(ctx);
+ emit_binning_workaround(batch);
fd_wfi(ctx, ring);
OUT_PKT3(ring, CP_INVALIDATE_STATE, 1);
OUT_RING(ring, 0x00007fff);
fd_wfi(ctx, ring);
if (ctx->screen->gpu_id == 320) {
- emit_binning_workaround(ctx);
+ emit_binning_workaround(batch);
}
}
/* before first tile */
static void
-fd3_emit_tile_init(struct fd_context *ctx)
+fd3_emit_tile_init(struct fd_batch *batch)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
uint32_t rb_render_control;
- fd3_emit_restore(ctx, ring);
+ fd3_emit_restore(batch->ctx, ring);
/* note: use gmem->bin_w/h, the bin_w/h parameters may be truncated
* at the right and bottom edge tiles
OUT_RING(ring, A3XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
A3XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
- update_vsc_pipe(ctx);
+ update_vsc_pipe(batch);
- if (use_hw_binning(ctx)) {
+ if (use_hw_binning(batch)) {
/* emit hw binning pass: */
- emit_binning_pass(ctx);
+ emit_binning_pass(batch);
- patch_draws(ctx, USE_VISIBILITY);
+ patch_draws(batch, USE_VISIBILITY);
} else {
- patch_draws(ctx, IGNORE_VISIBILITY);
+ patch_draws(batch, IGNORE_VISIBILITY);
}
rb_render_control = A3XX_RB_RENDER_CONTROL_ENABLE_GMEM |
A3XX_RB_RENDER_CONTROL_BIN_WIDTH(gmem->bin_w);
- patch_rbrc(ctx, rb_render_control);
+ patch_rbrc(batch, rb_render_control);
}
/* before mem2gmem */
static void
-fd3_emit_tile_prep(struct fd_context *ctx, struct fd_tile *tile)
+fd3_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_context *ctx = batch->ctx;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
if (ctx->needs_rb_fbd) {
fd_wfi(ctx, ring);
/* before IB to rendering cmds: */
static void
-fd3_emit_tile_renderprep(struct fd_context *ctx, struct fd_tile *tile)
+fd3_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t x1 = tile->xoff;
uint32_t y1 = tile->yoff;
OUT_RING(ring, 0x00000000);
}
- if (use_hw_binning(ctx)) {
+ if (use_hw_binning(batch)) {
struct fd_vsc_pipe *pipe = &ctx->pipe[tile->p];
assert(pipe->w * pipe->h);
(info->mode == PIPE_PRIM_POINTS))
primtype = DI_PT_POINTLIST_PSIZE;
- fd4_draw_emit(ctx, ring,
- primtype,
+ fd4_draw_emit(ctx->batch, ring, primtype,
emit->key.binning_pass ? IGNORE_VISIBILITY : USE_VISIBILITY,
info);
}
fd4_emit_state(ctx, ring, &emit);
fd4_emit_vertex_bufs(ring, &emit);
- reset_viewport(ring, &ctx->framebuffer);
+ reset_viewport(ring, &ctx->batch->framebuffer);
OUT_PKT0(ring, REG_A4XX_PC_PRIM_VTX_CNTL, 2);
OUT_RING(ring, A4XX_PC_PRIM_VTX_CNTL_VAROUT(0) |
OUT_PKT0(ring, REG_A4XX_GRAS_ALPHA_CONTROL, 1);
OUT_RING(ring, 0x00000002);
- fd4_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd4_draw(ctx->batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX_SIZE_IGN, 0, 0, NULL);
}
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd_ringbuffer *ring = ctx->batch->draw;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS] = {0};
unsigned dirty = ctx->dirty;
unsigned i;
OUT_PKT3(ring, CP_UNKNOWN_1A, 1);
OUT_RING(ring, 0x00000001);
- fd4_draw(ctx, ring, DI_PT_RECTLIST, USE_VISIBILITY,
+ fd4_draw(ctx->batch, ring, DI_PT_RECTLIST, USE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX_SIZE_IGN, 0, 0, NULL);
OUT_PKT3(ring, CP_UNKNOWN_1A, 1);
}
static inline void
-fd4_draw(struct fd_context *ctx, struct fd_ringbuffer *ring,
+fd4_draw(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
enum pc_di_src_sel src_sel, uint32_t count,
* we know if we are binning or not
*/
OUT_RINGP(ring, DRAW4(primtype, src_sel, idx_type, 0),
- &ctx->draw_patches);
+ &batch->draw_patches);
} else {
OUT_RING(ring, DRAW4(primtype, src_sel, idx_type, vismode));
}
emit_marker(ring, 7);
- fd_reset_wfi(ctx);
+ fd_reset_wfi(batch->ctx);
}
return INDEX4_SIZE_32_BIT;
}
static inline void
-fd4_draw_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
+fd4_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
const struct pipe_draw_info *info)
{
- struct pipe_index_buffer *idx = &ctx->indexbuf;
struct pipe_resource *idx_buffer = NULL;
enum a4xx_index_size idx_type;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
if (info->indexed) {
+ struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+
assert(!idx->user_buffer);
idx_buffer = idx->buffer;
src_sel = DI_SRC_SEL_AUTO_INDEX;
}
- fd4_draw(ctx, ring, primtype, vismode, src_sel,
+ fd4_draw(batch, ring, primtype, vismode, src_sel,
info->count, info->instance_count,
idx_type, idx_size, idx_offset, idx_buffer);
}
emit_marker(ring, 5);
if ((dirty & FD_DIRTY_FRAMEBUFFER) && !emit->key.binning_pass) {
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
unsigned char mrt_comp[A4XX_MAX_RENDER_TARGETS] = {0};
for (unsigned i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
if (dirty & (FD_DIRTY_ZSA | FD_DIRTY_FRAMEBUFFER)) {
struct fd4_zsa_stateobj *zsa = fd4_zsa_stateobj(ctx->zsa);
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
uint32_t rb_alpha_control = zsa->rb_alpha_control;
if (util_format_is_pure_integer(pipe_surface_format(pfb->cbufs[0])))
OUT_RING(ring, A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(scissor->minx) |
A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(scissor->miny));
- ctx->max_scissor.minx = MIN2(ctx->max_scissor.minx, scissor->minx);
- ctx->max_scissor.miny = MIN2(ctx->max_scissor.miny, scissor->miny);
- ctx->max_scissor.maxx = MAX2(ctx->max_scissor.maxx, scissor->maxx);
- ctx->max_scissor.maxy = MAX2(ctx->max_scissor.maxy, scissor->maxy);
+ ctx->batch->max_scissor.minx = MIN2(ctx->batch->max_scissor.minx, scissor->minx);
+ ctx->batch->max_scissor.miny = MIN2(ctx->batch->max_scissor.miny, scissor->miny);
+ ctx->batch->max_scissor.maxx = MAX2(ctx->batch->max_scissor.maxx, scissor->maxx);
+ ctx->batch->max_scissor.maxy = MAX2(ctx->batch->max_scissor.maxy, scissor->maxy);
}
if (dirty & FD_DIRTY_VIEWPORT) {
}
if (dirty & (FD_DIRTY_PROG | FD_DIRTY_FRAMEBUFFER)) {
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
unsigned n = pfb->nr_cbufs;
/* if we have depth/stencil, we need at least on MRT: */
if (pfb->zsbuf)
for (i = 0; i < A4XX_MAX_RENDER_TARGETS; i++) {
enum pipe_format format = pipe_surface_format(
- ctx->framebuffer.cbufs[i]);
+ ctx->batch->framebuffer.cbufs[i]);
bool is_int = util_format_is_pure_integer(format);
bool has_alpha = util_format_has_alpha(format);
uint32_t control = blend->rb_mrt[i].control;
}
static bool
-use_hw_binning(struct fd_context *ctx)
+use_hw_binning(struct fd_batch *batch)
{
- struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* this seems to be a hw bug.. but this hack fixes piglit fbo-maxsize: */
if ((pfb->width > 4096) && (pfb->height > 4096))
/* transfer from gmem to system memory (ie. normal RAM) */
static void
-emit_gmem2mem_surf(struct fd_context *ctx, bool stencil,
+emit_gmem2mem_surf(struct fd_batch *batch, bool stencil,
uint32_t base, struct pipe_surface *psurf)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_resource *rsc = fd_resource(psurf->texture);
enum pipe_format pformat = psurf->format;
struct fd_resource_slice *slice;
A4XX_RB_COPY_DEST_INFO_ENDIAN(ENDIAN_NONE) |
A4XX_RB_COPY_DEST_INFO_SWAP(fd4_pipe2swap(pformat)));
- fd4_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd4_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd4_emit_tile_gmem2mem(struct fd_context *ctx, struct fd_tile *tile)
+fd4_emit_tile_gmem2mem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd4_emit emit = {
.debug = &ctx->debug,
.vtx = &fd4_ctx->solid_vbuf_state,
fd4_program_emit(ring, &emit, 0, NULL);
fd4_emit_vertex_bufs(ring, &emit);
- if (ctx->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
+ if (batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
struct fd_resource *rsc = fd_resource(pfb->zsbuf->texture);
- if (!rsc->stencil || (ctx->resolve & FD_BUFFER_DEPTH))
- emit_gmem2mem_surf(ctx, false, ctx->gmem.zsbuf_base[0], pfb->zsbuf);
- if (rsc->stencil && (ctx->resolve & FD_BUFFER_STENCIL))
- emit_gmem2mem_surf(ctx, true, ctx->gmem.zsbuf_base[1], pfb->zsbuf);
+ if (!rsc->stencil || (batch->resolve & FD_BUFFER_DEPTH))
+ emit_gmem2mem_surf(batch, false, ctx->gmem.zsbuf_base[0], pfb->zsbuf);
+ if (rsc->stencil && (batch->resolve & FD_BUFFER_STENCIL))
+ emit_gmem2mem_surf(batch, true, ctx->gmem.zsbuf_base[1], pfb->zsbuf);
}
- if (ctx->resolve & FD_BUFFER_COLOR) {
+ if (batch->resolve & FD_BUFFER_COLOR) {
unsigned i;
for (i = 0; i < pfb->nr_cbufs; i++) {
if (!pfb->cbufs[i])
continue;
- if (!(ctx->resolve & (PIPE_CLEAR_COLOR0 << i)))
+ if (!(batch->resolve & (PIPE_CLEAR_COLOR0 << i)))
continue;
- emit_gmem2mem_surf(ctx, false, gmem->cbuf_base[i], pfb->cbufs[i]);
+ emit_gmem2mem_surf(batch, false, gmem->cbuf_base[i], pfb->cbufs[i]);
}
}
/* transfer from system memory to gmem */
static void
-emit_mem2gmem_surf(struct fd_context *ctx, uint32_t *bases,
+emit_mem2gmem_surf(struct fd_batch *batch, uint32_t *bases,
struct pipe_surface **bufs, uint32_t nr_bufs, uint32_t bin_w)
{
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct pipe_surface *zsbufs[2];
emit_mrt(ring, nr_bufs, bufs, bases, bin_w, false);
fd4_emit_gmem_restore_tex(ring, nr_bufs, bufs);
- fd4_draw(ctx, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
+ fd4_draw(batch, ring, DI_PT_RECTLIST, IGNORE_VISIBILITY,
DI_SRC_SEL_AUTO_INDEX, 2, 1, INDEX_SIZE_IGN, 0, 0, NULL);
}
static void
-fd4_emit_tile_mem2gmem(struct fd_context *ctx, struct fd_tile *tile)
+fd4_emit_tile_mem2gmem(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd4_emit emit = {
.debug = &ctx->debug,
.vtx = &fd4_ctx->blit_vbuf_state,
bin_w = gmem->bin_w;
bin_h = gmem->bin_h;
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_COLOR)) {
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_COLOR)) {
emit.prog = &ctx->blit_prog[pfb->nr_cbufs - 1];
emit.fp = NULL; /* frag shader changed so clear cache */
fd4_program_emit(ring, &emit, pfb->nr_cbufs, pfb->cbufs);
- emit_mem2gmem_surf(ctx, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
+ emit_mem2gmem_surf(batch, gmem->cbuf_base, pfb->cbufs, pfb->nr_cbufs, bin_w);
}
- if (fd_gmem_needs_restore(ctx, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
+ if (fd_gmem_needs_restore(batch, tile, FD_BUFFER_DEPTH | FD_BUFFER_STENCIL)) {
switch (pfb->zsbuf->format) {
case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
case PIPE_FORMAT_Z32_FLOAT:
}
emit.fp = NULL; /* frag shader changed so clear cache */
fd4_program_emit(ring, &emit, 1, &pfb->zsbuf);
- emit_mem2gmem_surf(ctx, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
+ emit_mem2gmem_surf(batch, gmem->zsbuf_base, &pfb->zsbuf, 1, bin_w);
}
OUT_PKT0(ring, REG_A4XX_GRAS_SC_CONTROL, 1);
}
static void
-patch_draws(struct fd_context *ctx, enum pc_di_vis_cull_mode vismode)
+patch_draws(struct fd_batch *batch, enum pc_di_vis_cull_mode vismode)
{
unsigned i;
- for (i = 0; i < fd_patch_num_elements(&ctx->draw_patches); i++) {
- struct fd_cs_patch *patch = fd_patch_element(&ctx->draw_patches, i);
+ for (i = 0; i < fd_patch_num_elements(&batch->draw_patches); i++) {
+ struct fd_cs_patch *patch = fd_patch_element(&batch->draw_patches, i);
*patch->cs = patch->val | DRAW4(0, 0, 0, vismode);
}
- util_dynarray_resize(&ctx->draw_patches, 0);
+ util_dynarray_resize(&batch->draw_patches, 0);
}
/* for rendering directly to system memory: */
static void
-fd4_emit_sysmem_prep(struct fd_context *ctx)
+fd4_emit_sysmem_prep(struct fd_batch *batch)
{
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- struct fd_ringbuffer *ring = ctx->ring;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
- fd4_emit_restore(ctx, ring);
+ fd4_emit_restore(batch->ctx, ring);
OUT_PKT0(ring, REG_A4XX_RB_FRAME_BUFFER_DIMENSION, 1);
OUT_RING(ring, A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(pfb->width) |
OUT_PKT0(ring, REG_A4XX_RB_RENDER_CONTROL, 1);
OUT_RING(ring, 0x8);
- patch_draws(ctx, IGNORE_VISIBILITY);
+ patch_draws(batch, IGNORE_VISIBILITY);
}
static void
-update_vsc_pipe(struct fd_context *ctx)
+update_vsc_pipe(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
int i;
OUT_PKT0(ring, REG_A4XX_VSC_SIZE_ADDRESS, 1);
}
static void
-emit_binning_pass(struct fd_context *ctx)
+emit_binning_pass(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- struct fd_batch *batch = ctx->batch;
- struct fd_ringbuffer *ring = ctx->ring;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
+ struct fd_ringbuffer *ring = batch->gmem;
int i;
uint32_t x1 = gmem->minx;
/* before first tile */
static void
-fd4_emit_tile_init(struct fd_context *ctx)
+fd4_emit_tile_init(struct fd_batch *batch)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct fd_gmem_stateobj *gmem = &ctx->gmem;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct fd_gmem_stateobj *gmem = &batch->ctx->gmem;
- fd4_emit_restore(ctx, ring);
+ fd4_emit_restore(batch->ctx, ring);
OUT_PKT0(ring, REG_A4XX_VSC_BIN_SIZE, 1);
OUT_RING(ring, A4XX_VSC_BIN_SIZE_WIDTH(gmem->bin_w) |
A4XX_VSC_BIN_SIZE_HEIGHT(gmem->bin_h));
- update_vsc_pipe(ctx);
+ update_vsc_pipe(batch);
- if (use_hw_binning(ctx)) {
+ if (use_hw_binning(batch)) {
OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
OUT_RING(ring, A4XX_RB_MODE_CONTROL_WIDTH(gmem->bin_w) |
A4XX_RB_MODE_CONTROL_HEIGHT(gmem->bin_h));
0x8);
/* emit hw binning pass: */
- emit_binning_pass(ctx);
+ emit_binning_pass(batch);
- patch_draws(ctx, USE_VISIBILITY);
+ patch_draws(batch, USE_VISIBILITY);
} else {
- patch_draws(ctx, IGNORE_VISIBILITY);
+ patch_draws(batch, IGNORE_VISIBILITY);
}
OUT_PKT0(ring, REG_A4XX_RB_MODE_CONTROL, 1);
/* before mem2gmem */
static void
-fd4_emit_tile_prep(struct fd_context *ctx, struct fd_tile *tile)
+fd4_emit_tile_prep(struct fd_batch *batch, struct fd_tile *tile)
{
- struct fd_ringbuffer *ring = ctx->ring;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_context *ctx = batch->ctx;
+ struct fd_ringbuffer *ring = batch->gmem;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
if (pfb->zsbuf) {
/* before IB to rendering cmds: */
static void
-fd4_emit_tile_renderprep(struct fd_context *ctx, struct fd_tile *tile)
+fd4_emit_tile_renderprep(struct fd_batch *batch, struct fd_tile *tile)
{
+ struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
- struct fd_ringbuffer *ring = ctx->ring;
+ struct fd_ringbuffer *ring = batch->gmem;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t x1 = tile->xoff;
uint32_t y1 = tile->yoff;
uint32_t x2 = tile->xoff + tile->bin_w - 1;
uint32_t y2 = tile->yoff + tile->bin_h - 1;
- if (use_hw_binning(ctx)) {
+ if (use_hw_binning(batch)) {
struct fd_vsc_pipe *pipe = &ctx->pipe[tile->p];
assert(pipe->w * pipe->h);
list_inithead(&batch->used_resources);
+ /* reset maximal bounds: */
+ batch->max_scissor.minx = batch->max_scissor.miny = ~0;
+ batch->max_scissor.maxx = batch->max_scissor.maxy = 0;
+
+ util_dynarray_init(&batch->draw_patches);
+
+ if (is_a3xx(ctx->screen))
+ util_dynarray_init(&batch->rbrc_patches);
+
return batch;
}
void
__fd_batch_destroy(struct fd_batch *batch)
{
+ util_copy_framebuffer_state(&batch->framebuffer, NULL);
fd_ringbuffer_del(batch->draw);
fd_ringbuffer_del(batch->binning);
fd_ringbuffer_del(batch->gmem);
+ util_dynarray_fini(&batch->draw_patches);
+
+ if (is_a3xx(batch->ctx->screen))
+ util_dynarray_fini(&batch->rbrc_patches);
+
free(batch);
}
{
struct fd_resource *rsc, *rsc_tmp;
- fd_gmem_render_tiles(batch->ctx);
+ DBG("%p: needs_flush=%d", batch, batch->needs_flush);
+
+ if (!batch->needs_flush)
+ return;
+
+ fd_gmem_render_tiles(batch);
/* go through all the used resources and clear their reading flag */
LIST_FOR_EACH_ENTRY_SAFE(rsc, rsc_tmp, &batch->used_resources, list) {
struct fd_batch {
struct pipe_reference reference;
unsigned seqno;
+
struct fd_context *ctx;
+ /* do we need to mem2gmem before rendering. We don't, if for example,
+ * there was a glClear() that invalidated the entire previous buffer
+ * contents. Keep track of which buffer(s) are cleared, or needs
+ * restore. Masks of PIPE_CLEAR_*
+ *
+ * The 'cleared' bits will be set for buffers which are *entirely*
+ * cleared, and 'partial_cleared' bits will be set if you must
+ * check cleared_scissor.
+ */
+ enum {
+ /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
+ FD_BUFFER_COLOR = PIPE_CLEAR_COLOR,
+ FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH,
+ FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
+ FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
+ } cleared, partial_cleared, restore, resolve;
+
+ bool needs_flush;
+
+ /* To decide whether to render to system memory, keep track of the
+ * number of draws, and whether any of them require multisample,
+ * depth_test (or depth write), stencil_test, blending, and
+ * color_logic_Op (since those functions are disabled when by-
+ * passing GMEM.
+ */
+ enum {
+ FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
+ FD_GMEM_DEPTH_ENABLED = 0x02,
+ FD_GMEM_STENCIL_ENABLED = 0x04,
+
+ FD_GMEM_MSAA_ENABLED = 0x08,
+ FD_GMEM_BLEND_ENABLED = 0x10,
+ FD_GMEM_LOGICOP_ENABLED = 0x20,
+ } gmem_reason;
+ unsigned num_draws; /* number of draws in current batch */
+
+ /* Track the maximal bounds of the scissor of all the draws within a
+ * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
+ * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
+ */
+ struct pipe_scissor_state max_scissor;
+
+ /* Track the cleared scissor for color/depth/stencil, so we know
+ * which, if any, tiles need to be restored (mem2gmem). Only valid
+ * if the corresponding bit in ctx->cleared is set.
+ */
+ struct {
+ struct pipe_scissor_state color, depth, stencil;
+ } cleared_scissor;
+
+ /* Keep track of DRAW initiators that need to be patched up depending
+ * on whether we using binning or not:
+ */
+ struct util_dynarray draw_patches;
+
+ /* Keep track of writes to RB_RENDER_CONTROL which need to be patched
+ * once we know whether or not to use GMEM, and GMEM tile pitch.
+ *
+ * (only for a3xx.. but having gen specific subclasses of fd_batch
+ * seemed overkill for now)
+ */
+ struct util_dynarray rbrc_patches;
+
+ struct pipe_framebuffer_state framebuffer;
+
/** draw pass cmdstream: */
struct fd_ringbuffer *draw;
/** binning pass cmdstream: */
fd_context_render(struct pipe_context *pctx)
{
struct fd_context *ctx = fd_context(pctx);
-
- DBG("needs_flush: %d", ctx->needs_flush);
-
- if (!ctx->needs_flush)
- return;
+ struct fd_batch *new_batch;
fd_batch_flush(ctx->batch);
+ new_batch = fd_batch_create(ctx);
+ util_copy_framebuffer_state(&new_batch->framebuffer, &ctx->batch->framebuffer);
fd_batch_reference(&ctx->batch, NULL);
- ctx->batch = fd_batch_create(ctx);
-
- ctx->needs_flush = false;
- ctx->cleared = ctx->partial_cleared = ctx->restore = ctx->resolve = 0;
- ctx->gmem_reason = 0;
- ctx->num_draws = 0;
+ ctx->batch = new_batch;
}
static void
fd_prog_fini(pctx);
fd_hw_query_fini(pctx);
- util_dynarray_fini(&ctx->draw_patches);
-
if (ctx->blitter)
util_blitter_destroy(ctx->blitter);
fd_reset_wfi(ctx);
- util_dynarray_init(&ctx->draw_patches);
-
util_slab_create(&ctx->transfer_pool, sizeof(struct fd_transfer),
16, UTIL_SLAB_SINGLETHREADED);
struct fd_program_stateobj blit_prog[MAX_RENDER_TARGETS]; // TODO move to screen?
struct fd_program_stateobj blit_z, blit_zs;
- /* do we need to mem2gmem before rendering. We don't, if for example,
- * there was a glClear() that invalidated the entire previous buffer
- * contents. Keep track of which buffer(s) are cleared, or needs
- * restore. Masks of PIPE_CLEAR_*
- *
- * The 'cleared' bits will be set for buffers which are *entirely*
- * cleared, and 'partial_cleared' bits will be set if you must
- * check cleared_scissor.
- */
- enum {
- /* align bitmask values w/ PIPE_CLEAR_*.. since that is convenient.. */
- FD_BUFFER_COLOR = PIPE_CLEAR_COLOR,
- FD_BUFFER_DEPTH = PIPE_CLEAR_DEPTH,
- FD_BUFFER_STENCIL = PIPE_CLEAR_STENCIL,
- FD_BUFFER_ALL = FD_BUFFER_COLOR | FD_BUFFER_DEPTH | FD_BUFFER_STENCIL,
- } cleared, partial_cleared, restore, resolve;
-
- bool needs_flush;
-
- /* To decide whether to render to system memory, keep track of the
- * number of draws, and whether any of them require multisample,
- * depth_test (or depth write), stencil_test, blending, and
- * color_logic_Op (since those functions are disabled when by-
- * passing GMEM.
- */
- enum {
- FD_GMEM_CLEARS_DEPTH_STENCIL = 0x01,
- FD_GMEM_DEPTH_ENABLED = 0x02,
- FD_GMEM_STENCIL_ENABLED = 0x04,
-
- FD_GMEM_MSAA_ENABLED = 0x08,
- FD_GMEM_BLEND_ENABLED = 0x10,
- FD_GMEM_LOGICOP_ENABLED = 0x20,
- } gmem_reason;
- unsigned num_draws; /* number of draws in current batch */
-
/* Stats/counters:
*/
struct {
uint64_t batch_total, batch_sysmem, batch_gmem, batch_restore;
} stats;
- /* TODO get rid of this.. only used in gmem/tiling code paths (and
- * NULL the rest of the time). Just leaving for now to reduce some
- * churn..
- */
- struct fd_ringbuffer *ring;
-
/* Current batch.. the rule here is that you can deref ctx->batch
* in codepaths from pipe_context entrypoints. But not in code-
* paths from fd_batch_flush() (basically, the stuff that gets
* */
bool needs_rb_fbd;
- /* Keep track of DRAW initiators that need to be patched up depending
- * on whether we using binning or not:
- */
- struct util_dynarray draw_patches;
-
struct pipe_scissor_state scissor;
/* we don't have a disable/enable bit for scissor, so instead we keep
*/
struct pipe_scissor_state disabled_scissor;
- /* Track the maximal bounds of the scissor of all the draws within a
- * batch. Used at the tile rendering step (fd_gmem_render_tiles(),
- * mem2gmem/gmem2mem) to avoid needlessly moving data in/out of gmem.
- */
- struct pipe_scissor_state max_scissor;
-
- /* Track the cleared scissor for color/depth/stencil, so we know
- * which, if any, tiles need to be restored (mem2gmem). Only valid
- * if the corresponding bit in ctx->cleared is set.
- */
- struct {
- struct pipe_scissor_state color, depth, stencil;
- } cleared_scissor;
-
/* Current gmem/tiling configuration.. gets updated on render_tiles()
* if out of date with current maximal-scissor/cpp:
+ *
+ * (NOTE: this is kind of related to the batch, but moving it there
+ * means we'd always have to recalc tiles ever batch)
*/
struct fd_gmem_stateobj gmem;
struct fd_vsc_pipe pipe[8];
struct pipe_blend_color blend_color;
struct pipe_stencil_ref stencil_ref;
unsigned sample_mask;
- struct pipe_framebuffer_state framebuffer;
struct pipe_poly_stipple stipple;
struct pipe_viewport_state viewport;
struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
struct pipe_debug_callback debug;
/* GMEM/tile handling fxns: */
- void (*emit_tile_init)(struct fd_context *ctx);
- void (*emit_tile_prep)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_context *ctx, struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_context *ctx, struct fd_tile *tile);
+ void (*emit_tile_init)(struct fd_batch *batch);
+ void (*emit_tile_prep)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_renderprep)(struct fd_batch *batch, struct fd_tile *tile);
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, struct fd_tile *tile);
/* optional, for GMEM bypass: */
- void (*emit_sysmem_prep)(struct fd_context *ctx);
+ void (*emit_sysmem_prep)(struct fd_batch *batch);
/* draw: */
bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info);
#include "freedreno_util.h"
static void
-resource_read(struct fd_context *ctx, struct pipe_resource *prsc)
+resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
{
if (!prsc)
return;
- fd_batch_resource_used(ctx->batch, fd_resource(prsc), FD_PENDING_READ);
+ fd_batch_resource_used(batch, fd_resource(prsc), FD_PENDING_READ);
}
static void
-resource_written(struct fd_context *ctx, struct pipe_resource *prsc)
+resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
{
if (!prsc)
return;
- fd_batch_resource_used(ctx->batch, fd_resource(prsc), FD_PENDING_WRITE);
+ fd_batch_resource_used(batch, fd_resource(prsc), FD_PENDING_WRITE);
}
static void
fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
{
struct fd_context *ctx = fd_context(pctx);
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_batch *batch = ctx->batch;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
unsigned i, prims, buffers = 0;
if (fd_depth_enabled(ctx)) {
buffers |= FD_BUFFER_DEPTH;
- resource_written(ctx, pfb->zsbuf->texture);
- ctx->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
+ resource_written(batch, pfb->zsbuf->texture);
+ batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
}
if (fd_stencil_enabled(ctx)) {
buffers |= FD_BUFFER_STENCIL;
- resource_written(ctx, pfb->zsbuf->texture);
- ctx->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
+ resource_written(batch, pfb->zsbuf->texture);
+ batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
}
if (fd_logicop_enabled(ctx))
- ctx->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
+ batch->gmem_reason |= FD_GMEM_LOGICOP_ENABLED;
for (i = 0; i < pfb->nr_cbufs; i++) {
struct pipe_resource *surf;
surf = pfb->cbufs[i]->texture;
- resource_written(ctx, surf);
+ resource_written(batch, surf);
buffers |= PIPE_CLEAR_COLOR0 << i;
if (surf->nr_samples > 1)
- ctx->gmem_reason |= FD_GMEM_MSAA_ENABLED;
+ batch->gmem_reason |= FD_GMEM_MSAA_ENABLED;
if (fd_blend_enabled(ctx, i))
- ctx->gmem_reason |= FD_GMEM_BLEND_ENABLED;
+ batch->gmem_reason |= FD_GMEM_BLEND_ENABLED;
}
/* Skip over buffer 0, that is sent along with the command stream */
for (i = 1; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
- resource_read(ctx, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
- resource_read(ctx, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
+ resource_read(batch, ctx->constbuf[PIPE_SHADER_VERTEX].cb[i].buffer);
+ resource_read(batch, ctx->constbuf[PIPE_SHADER_FRAGMENT].cb[i].buffer);
}
/* Mark VBOs as being read */
for (i = 0; i < ctx->vtx.vertexbuf.count; i++) {
assert(!ctx->vtx.vertexbuf.vb[i].user_buffer);
- resource_read(ctx, ctx->vtx.vertexbuf.vb[i].buffer);
+ resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer);
}
/* Mark index buffer as being read */
- resource_read(ctx, ctx->indexbuf.buffer);
+ resource_read(batch, ctx->indexbuf.buffer);
/* Mark textures as being read */
for (i = 0; i < ctx->verttex.num_textures; i++)
if (ctx->verttex.textures[i])
- resource_read(ctx, ctx->verttex.textures[i]->texture);
+ resource_read(batch, ctx->verttex.textures[i]->texture);
for (i = 0; i < ctx->fragtex.num_textures; i++)
if (ctx->fragtex.textures[i])
- resource_read(ctx, ctx->fragtex.textures[i]->texture);
+ resource_read(batch, ctx->fragtex.textures[i]->texture);
/* Mark streamout buffers as being written.. */
for (i = 0; i < ctx->streamout.num_targets; i++)
if (ctx->streamout.targets[i])
- resource_written(ctx, ctx->streamout.targets[i]->buffer);
+ resource_written(batch, ctx->streamout.targets[i]->buffer);
- ctx->num_draws++;
+ batch->num_draws++;
prims = u_reduced_prims_for_vertices(info->mode, info->count);
ctx->stats.prims_generated += prims;
/* any buffers that haven't been cleared yet, we need to restore: */
- ctx->restore |= buffers & (FD_BUFFER_ALL & ~ctx->cleared);
+ batch->restore |= buffers & (FD_BUFFER_ALL & ~batch->cleared);
/* and any buffers used, need to be resolved: */
- ctx->resolve |= buffers;
+ batch->resolve |= buffers;
- DBG("%x num_draws=%u (%s/%s)", buffers, ctx->num_draws,
+ DBG("%x num_draws=%u (%s/%s)", buffers, batch->num_draws,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- fd_hw_query_set_stage(ctx, ctx->batch->draw, FD_STAGE_DRAW);
+ fd_hw_query_set_stage(ctx, batch->draw, FD_STAGE_DRAW);
if (ctx->draw_vbo(ctx, info))
- ctx->needs_flush = true;
+ batch->needs_flush = true;
for (i = 0; i < ctx->streamout.num_targets; i++)
ctx->streamout.offsets[i] += info->count;
if (fd_mesa_debug & FD_DBG_DDRAW)
ctx->dirty = 0xffffffff;
- fd_batch_check_size(ctx->batch);
+ fd_batch_check_size(batch);
}
/* TODO figure out how to make better use of existing state mechanism
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct fd_context *ctx = fd_context(pctx);
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct fd_batch *batch = ctx->batch;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct pipe_scissor_state *scissor = fd_context_get_scissor(ctx);
unsigned cleared_buffers;
int i;
* something like alpha-test causes side effects from the draw in
* the depth buffer, etc)
*/
- cleared_buffers = buffers & (FD_BUFFER_ALL & ~ctx->restore);
+ cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
/* do we have full-screen scissor? */
if (!memcmp(scissor, &ctx->disabled_scissor, sizeof(*scissor))) {
- ctx->cleared |= cleared_buffers;
+ batch->cleared |= cleared_buffers;
} else {
- ctx->partial_cleared |= cleared_buffers;
+ batch->partial_cleared |= cleared_buffers;
if (cleared_buffers & PIPE_CLEAR_COLOR)
- ctx->cleared_scissor.color = *scissor;
+ batch->cleared_scissor.color = *scissor;
if (cleared_buffers & PIPE_CLEAR_DEPTH)
- ctx->cleared_scissor.depth = *scissor;
+ batch->cleared_scissor.depth = *scissor;
if (cleared_buffers & PIPE_CLEAR_STENCIL)
- ctx->cleared_scissor.stencil = *scissor;
+ batch->cleared_scissor.stencil = *scissor;
}
- ctx->resolve |= buffers;
- ctx->needs_flush = true;
+ batch->resolve |= buffers;
+ batch->needs_flush = true;
if (buffers & PIPE_CLEAR_COLOR)
for (i = 0; i < pfb->nr_cbufs; i++)
if (buffers & (PIPE_CLEAR_COLOR0 << i))
- resource_written(ctx, pfb->cbufs[i]->texture);
+ resource_written(batch, pfb->cbufs[i]->texture);
if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
- resource_written(ctx, pfb->zsbuf->texture);
- ctx->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
+ resource_written(batch, pfb->zsbuf->texture);
+ batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
}
DBG("%x depth=%f, stencil=%u (%s/%s)", buffers, depth, stencil,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
- fd_hw_query_set_stage(ctx, ctx->batch->draw, FD_STAGE_CLEAR);
+ fd_hw_query_set_stage(ctx, batch->draw, FD_STAGE_CLEAR);
ctx->clear(ctx, buffers, color, depth, stencil);
void fd_draw_init(struct pipe_context *pctx);
static inline void
-fd_draw(struct fd_context *ctx, struct fd_ringbuffer *ring,
+fd_draw(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
enum pc_di_src_sel src_sel, uint32_t count,
*/
emit_marker(ring, 7);
- if (is_a3xx_p0(ctx->screen)) {
+ if (is_a3xx_p0(batch->ctx->screen)) {
/* dummy-draw workaround: */
OUT_PKT3(ring, CP_DRAW_INDX, 3);
OUT_RING(ring, 0x00000000);
* we know if we are binning or not
*/
OUT_RINGP(ring, DRAW(primtype, src_sel, idx_type, 0, instances),
- &ctx->draw_patches);
+ &batch->draw_patches);
} else {
OUT_RING(ring, DRAW(primtype, src_sel, idx_type, vismode, instances));
}
emit_marker(ring, 7);
- fd_reset_wfi(ctx);
+ fd_reset_wfi(batch->ctx);
}
/* this is same for a2xx/a3xx, so split into helper: */
static inline void
-fd_draw_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
+fd_draw_emit(struct fd_batch *batch, struct fd_ringbuffer *ring,
enum pc_di_primtype primtype,
enum pc_di_vis_cull_mode vismode,
const struct pipe_draw_info *info)
{
- struct pipe_index_buffer *idx = &ctx->indexbuf;
struct pipe_resource *idx_buffer = NULL;
enum pc_di_index_size idx_type = INDEX_SIZE_IGN;
enum pc_di_src_sel src_sel;
uint32_t idx_size, idx_offset;
if (info->indexed) {
+ struct pipe_index_buffer *idx = &batch->ctx->indexbuf;
+
assert(!idx->user_buffer);
idx_buffer = idx->buffer;
src_sel = DI_SRC_SEL_AUTO_INDEX;
}
- fd_draw(ctx, ring, primtype, vismode, src_sel,
+ fd_draw(batch, ring, primtype, vismode, src_sel,
info->count, info->instance_count - 1,
idx_type, idx_size, idx_offset, idx_buffer);
}
* resolve.
*/
-static uint32_t bin_width(struct fd_context *ctx)
+static uint32_t bin_width(struct fd_screen *screen)
{
- if (is_a4xx(ctx->screen))
+ if (is_a4xx(screen))
return 1024;
- if (is_a3xx(ctx->screen))
+ if (is_a3xx(screen))
return 992;
return 512;
}
}
static void
-calculate_tiles(struct fd_context *ctx)
+calculate_tiles(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- struct pipe_scissor_state *scissor = &ctx->max_scissor;
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
+ struct pipe_scissor_state *scissor = &batch->max_scissor;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t gmem_size = ctx->screen->gmemsize_bytes;
uint32_t minx, miny, width, height;
uint32_t nbins_x = 1, nbins_y = 1;
uint32_t bin_w, bin_h;
- uint32_t max_width = bin_width(ctx);
+ uint32_t max_width = bin_width(ctx->screen);
uint8_t cbuf_cpp[MAX_RENDER_TARGETS] = {0}, zsbuf_cpp[2] = {0};
uint32_t i, j, t, xoff, yoff;
uint32_t tpp_x, tpp_y;
- bool has_zs = !!(ctx->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL));
+ bool has_zs = !!(batch->resolve & (FD_BUFFER_DEPTH | FD_BUFFER_STENCIL));
int tile_n[ARRAY_SIZE(ctx->pipe)];
if (has_zs) {
}
static void
-render_tiles(struct fd_context *ctx)
+render_tiles(struct fd_batch *batch)
{
+ struct fd_context *ctx = batch->ctx;
struct fd_gmem_stateobj *gmem = &ctx->gmem;
int i;
- ctx->emit_tile_init(ctx);
+ ctx->emit_tile_init(batch);
- if (ctx->restore)
+ if (batch->restore)
ctx->stats.batch_restore++;
for (i = 0; i < (gmem->nbins_x * gmem->nbins_y); i++) {
DBG("bin_h=%d, yoff=%d, bin_w=%d, xoff=%d",
tile->bin_h, tile->yoff, tile->bin_w, tile->xoff);
- ctx->emit_tile_prep(ctx, tile);
+ ctx->emit_tile_prep(batch, tile);
- if (ctx->restore) {
- fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_MEM2GMEM);
- ctx->emit_tile_mem2gmem(ctx, tile);
- fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
+ if (batch->restore) {
+ fd_hw_query_set_stage(ctx, batch->gmem, FD_STAGE_MEM2GMEM);
+ ctx->emit_tile_mem2gmem(batch, tile);
+ fd_hw_query_set_stage(ctx, batch->gmem, FD_STAGE_NULL);
}
- ctx->emit_tile_renderprep(ctx, tile);
+ ctx->emit_tile_renderprep(batch, tile);
- fd_hw_query_prepare_tile(ctx, i, ctx->ring);
+ fd_hw_query_prepare_tile(ctx, i, batch->gmem);
/* emit IB to drawcmds: */
- ctx->emit_ib(ctx->ring, ctx->batch->draw);
+ ctx->emit_ib(batch->gmem, batch->draw);
fd_reset_wfi(ctx);
/* emit gmem2mem to transfer tile back to system memory: */
- fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_GMEM2MEM);
- ctx->emit_tile_gmem2mem(ctx, tile);
- fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
+ fd_hw_query_set_stage(ctx, batch->gmem, FD_STAGE_GMEM2MEM);
+ ctx->emit_tile_gmem2mem(batch, tile);
+ fd_hw_query_set_stage(ctx, batch->gmem, FD_STAGE_NULL);
}
}
static void
-render_sysmem(struct fd_context *ctx)
+render_sysmem(struct fd_batch *batch)
{
- ctx->emit_sysmem_prep(ctx);
+ struct fd_context *ctx = batch->ctx;
- fd_hw_query_prepare_tile(ctx, 0, ctx->ring);
+ ctx->emit_sysmem_prep(batch);
+
+ fd_hw_query_prepare_tile(ctx, 0, batch->gmem);
/* emit IB to drawcmds: */
- ctx->emit_ib(ctx->ring, ctx->batch->draw);
+ ctx->emit_ib(batch->gmem, batch->draw);
fd_reset_wfi(ctx);
}
void
-fd_gmem_render_tiles(struct fd_context *ctx)
+fd_gmem_render_tiles(struct fd_batch *batch)
{
- struct pipe_framebuffer_state *pfb = &ctx->framebuffer;
- struct fd_batch *batch = ctx->batch;
+ struct fd_context *ctx = batch->ctx;
+ struct pipe_framebuffer_state *pfb = &batch->framebuffer;
bool sysmem = false;
if (ctx->emit_sysmem_prep) {
- if (ctx->cleared || ctx->gmem_reason || (ctx->num_draws > 5)) {
+ if (batch->cleared || batch->gmem_reason || (batch->num_draws > 5)) {
DBG("GMEM: cleared=%x, gmem_reason=%x, num_draws=%u",
- ctx->cleared, ctx->gmem_reason, ctx->num_draws);
+ batch->cleared, batch->gmem_reason, batch->num_draws);
} else if (!(fd_mesa_debug & FD_DBG_NOBYPASS)) {
sysmem = true;
}
ctx->stats.batch_total++;
- ctx->ring = batch->gmem;
-
if (sysmem) {
DBG("rendering sysmem (%s/%s)",
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
fd_hw_query_prepare(ctx, 1);
- render_sysmem(ctx);
+ render_sysmem(batch);
ctx->stats.batch_sysmem++;
} else {
struct fd_gmem_stateobj *gmem = &ctx->gmem;
- calculate_tiles(ctx);
+ calculate_tiles(batch);
DBG("rendering %dx%d tiles (%s/%s)", gmem->nbins_x, gmem->nbins_y,
util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
util_format_short_name(pipe_surface_format(pfb->zsbuf)));
fd_hw_query_prepare(ctx, gmem->nbins_x * gmem->nbins_y);
- render_tiles(ctx);
+ render_tiles(batch);
ctx->stats.batch_gmem++;
}
fd_ringbuffer_flush(batch->gmem);
- ctx->ring = NULL;
-
fd_reset_wfi(ctx);
- /* reset maximal bounds: */
- ctx->max_scissor.minx = ctx->max_scissor.miny = ~0;
- ctx->max_scissor.maxx = ctx->max_scissor.maxy = 0;
-
ctx->dirty = ~0;
}
* case would be a single clear.
*/
bool
-fd_gmem_needs_restore(struct fd_context *ctx, struct fd_tile *tile,
+fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile,
uint32_t buffers)
{
- if (!(ctx->restore & buffers))
+ if (!(batch->restore & buffers))
return false;
/* if buffers partially cleared, then slow-path to figure out
* if this particular tile needs restoring:
*/
if ((buffers & FD_BUFFER_COLOR) &&
- (ctx->partial_cleared & FD_BUFFER_COLOR) &&
- skip_restore(&ctx->cleared_scissor.color, tile))
+ (batch->partial_cleared & FD_BUFFER_COLOR) &&
+ skip_restore(&batch->cleared_scissor.color, tile))
return false;
if ((buffers & FD_BUFFER_DEPTH) &&
- (ctx->partial_cleared & FD_BUFFER_DEPTH) &&
- skip_restore(&ctx->cleared_scissor.depth, tile))
+ (batch->partial_cleared & FD_BUFFER_DEPTH) &&
+ skip_restore(&batch->cleared_scissor.depth, tile))
return false;
if ((buffers & FD_BUFFER_STENCIL) &&
- (ctx->partial_cleared & FD_BUFFER_STENCIL) &&
- skip_restore(&ctx->cleared_scissor.stencil, tile))
+ (batch->partial_cleared & FD_BUFFER_STENCIL) &&
+ skip_restore(&batch->cleared_scissor.stencil, tile))
return false;
return true;
uint16_t width, height;
};
-struct fd_context;
+struct fd_batch;
-void fd_gmem_render_tiles(struct fd_context *ctx);
+void fd_gmem_render_tiles(struct fd_batch *batch);
-bool fd_gmem_needs_restore(struct fd_context *ctx, struct fd_tile *tile,
+bool fd_gmem_needs_restore(struct fd_batch *batch, struct fd_tile *tile,
uint32_t buffers);
#endif /* FREEDRENO_GMEM_H_ */
if (!ctx->sample_cache[idx]) {
ctx->sample_cache[idx] =
ctx->sample_providers[idx]->get_sample(ctx, ring);
- ctx->needs_flush = true;
+ ctx->batch->needs_flush = true;
}
fd_hw_sample_reference(ctx, &samp, ctx->sample_cache[idx]);
/* if app didn't actually trigger any cmdstream, then
* we have nothing to do:
*/
- if (!ctx->needs_flush)
+ if (!ctx->batch->needs_flush)
return true;
DBG("reading query result forces flush!");
fd_context_render(&ctx->base);
util_blitter_save_depth_stencil_alpha(ctx->blitter, ctx->zsa);
util_blitter_save_stencil_ref(ctx->blitter, &ctx->stencil_ref);
util_blitter_save_sample_mask(ctx->blitter, ctx->sample_mask);
- util_blitter_save_framebuffer(ctx->blitter, &ctx->framebuffer);
+ util_blitter_save_framebuffer(ctx->blitter,
+ ctx->batch ? &ctx->batch->framebuffer : NULL);
util_blitter_save_fragment_sampler_states(ctx->blitter,
ctx->fragtex.num_samplers,
(void **)ctx->fragtex.samplers);
const struct pipe_framebuffer_state *framebuffer)
{
struct fd_context *ctx = fd_context(pctx);
- struct pipe_framebuffer_state *cso = &ctx->framebuffer;
+ struct pipe_framebuffer_state *cso;
- DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->needs_flush,
+ DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
framebuffer->cbufs[0], framebuffer->zsbuf);
fd_context_render(pctx);
+ cso = &ctx->batch->framebuffer;
+
if ((cso->width != framebuffer->width) ||
(cso->height != framebuffer->height))
ctx->needs_rb_fbd = true;