static void
fd2_context_destroy(struct pipe_context *pctx)
+ in_dt
{
fd_context_destroy(pctx);
free(pctx);
static void
emit_vertexbufs(struct fd_context *ctx)
+ assert_dt
{
struct fd_vertex_stateobj *vtx = ctx->vtx.vtx;
struct fd_vertexbuf_stateobj *vertexbuf = &ctx->vtx.vertexbuf;
static void
draw_impl(struct fd_context *ctx, const struct pipe_draw_info *info,
- const struct pipe_draw_start_count *draw,
- struct fd_ringbuffer *ring, unsigned index_offset, bool binning)
+ const struct pipe_draw_start_count *draw,
+ struct fd_ringbuffer *ring, unsigned index_offset, bool binning)
+ assert_dt
{
OUT_PKT3(ring, CP_SET_CONSTANT, 2);
OUT_RING(ring, CP_REG(REG_A2XX_VGT_INDX_OFFSET));
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *pdraw,
unsigned index_offset)
+ assert_dt
{
if (!ctx->prog.fs || !ctx->prog.vs)
return false;
static void
clear_state(struct fd_batch *batch, struct fd_ringbuffer *ring,
- unsigned buffers, bool fast_clear)
+ unsigned buffers, bool fast_clear)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
static bool
fd2_clear_fast(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
+ assert_dt
{
/* using 4x MSAA allows clearing ~2x faster
* then we can use higher bpp clearing to clear lower bpp
static bool
fd2_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
+ assert_dt
{
struct fd_ringbuffer *ring = ctx->batch->draw;
struct pipe_framebuffer_state *fb = &ctx->batch->framebuffer;
void
fd2_draw_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd2_draw_vbo;
void fd2_emit_vertex_bufs(struct fd_ringbuffer *ring, uint32_t val,
struct fd2_vertex_buf *vbufs, uint32_t n);
-void fd2_emit_state_binning(struct fd_context *ctx, const enum fd_dirty_3d_state dirty);
-void fd2_emit_state(struct fd_context *ctx, const enum fd_dirty_3d_state dirty);
+void fd2_emit_state_binning(struct fd_context *ctx, const enum fd_dirty_3d_state dirty) assert_dt;
+void fd2_emit_state(struct fd_context *ctx, const enum fd_dirty_3d_state dirty) assert_dt;
void fd2_emit_restore(struct fd_context *ctx, struct fd_ringbuffer *ring);
void fd2_emit_init_screen(struct pipe_screen *pscreen);
static void
prepare_tile_fini_ib(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
static void
fd2_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
/* before first tile */
static void
fd2_emit_tile_init(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
/* before IB to rendering cmds: */
static void
fd2_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd2_context *fd2_ctx = fd2_context(ctx);
void
fd2_gmem_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static void
patch_vtx_fetch(struct fd_context *ctx, struct pipe_vertex_element *elem,
- instr_fetch_vtx_t *instr, uint16_t dst_swiz)
+ instr_fetch_vtx_t *instr, uint16_t dst_swiz)
+ assert_dt
{
struct surface_format fmt = fd2_pipe2surface(elem->src_format);
static void
patch_fetches(struct fd_context *ctx, struct ir2_shader_info *info,
- struct fd_vertex_stateobj *vtx, struct fd_texture_stateobj *tex)
+ struct fd_vertex_stateobj *vtx, struct fd_texture_stateobj *tex)
+ assert_dt
{
for (int i = 0; i < info->num_fetch_instrs; i++) {
struct ir2_fetch_info *fi = &info->fetch_info[i];
};
void fd2_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd_program_stateobj *prog);
+ struct fd_program_stateobj *prog) assert_dt;
void fd2_prog_init(struct pipe_context *pctx);
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
void
fd2_query_context_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
fd2_sampler_states_bind(struct pipe_context *pctx,
enum pipe_shader_type shader, unsigned start,
unsigned nr, void **hwcso)
+ in_dt
{
if (!hwcso)
nr = 0;
fd2_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
unsigned start, unsigned nr, unsigned unbind_num_trailing_slots,
struct pipe_sampler_view **views)
+ in_dt
{
if (shader == PIPE_SHADER_FRAGMENT) {
struct fd_context *ctx = fd_context(pctx);
unsigned
fd2_get_const_idx(struct fd_context *ctx, struct fd_texture_stateobj *tex,
unsigned samp_id)
+ assert_dt
{
if (tex == &ctx->tex[PIPE_SHADER_FRAGMENT])
return samp_id;
static void
fd3_context_destroy(struct pipe_context *pctx)
+ in_dt
{
struct fd3_context *fd3_ctx = fd3_context(fd_context(pctx));
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd3_emit *emit, unsigned index_offset)
+ assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
+ assert_dt
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct ir3_shader_key *last_key = &fd3_ctx->last_key;
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
+ in_dt
{
struct fd3_context *fd3_ctx = fd3_context(ctx);
struct fd3_emit emit = {
void
fd3_draw_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd3_draw_vbo;
return emit->fs;
}
-void fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit);
+void fd3_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd3_emit *emit) assert_dt;
void fd3_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd3_emit *emit);
+ struct fd3_emit *emit) assert_dt;
-void fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
+void fd3_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd3_emit_init_screen(struct pipe_screen *pscreen);
void fd3_emit_init(struct pipe_context *pctx);
static inline void
fd3_emit_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
+ assert_dt
{
fd_wfi(batch, ring);
OUT_PKT0(ring, REG_A3XX_UCHE_CACHE_INVALIDATE0_REG, 2);
static void update_vsc_pipe(struct fd_batch *batch);
static void
emit_binning_workaround(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
static void
fd3_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
static void
fd3_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* for rendering directly to system memory: */
static void
fd3_emit_sysmem_prep(struct fd_batch *batch)
+ assert_dt
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
static void
update_vsc_pipe(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
static void
emit_binning_pass(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* before first tile */
static void
fd3_emit_tile_init(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before IB to rendering cmds: */
static void
fd3_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd3_context *fd3_ctx = fd3_context(ctx);
void
fd3_gmem_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
};
void fd3_query_context_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static void
fd4_context_destroy(struct pipe_context *pctx)
+ in_dt
{
struct fd4_context *fd4_ctx = fd4_context(fd_context(pctx));
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd4_emit *emit, unsigned index_offset)
+ assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
+ assert_dt
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct ir3_shader_key *last_key = &fd4_ctx->last_key;
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
+ in_dt
{
struct fd4_context *fd4_ctx = fd4_context(ctx);
struct fd4_emit emit = {
void
fd4_draw_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd4_draw_vbo;
return emit->fs;
}
-void fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit);
+void fd4_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd4_emit *emit) assert_dt;
void fd4_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd4_emit *emit);
+ struct fd4_emit *emit) assert_dt;
-void fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
+void fd4_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd4_emit_init_screen(struct pipe_screen *pscreen);
void fd4_emit_init(struct pipe_context *pctx);
static void
fd4_emit_tile_gmem2mem(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
static void
fd4_emit_tile_mem2gmem(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* for rendering directly to system memory: */
static void
fd4_emit_sysmem_prep(struct fd_batch *batch)
+ assert_dt
{
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
struct fd_ringbuffer *ring = batch->gmem;
static void
update_vsc_pipe(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
static void
emit_binning_pass(struct fd_batch *batch)
+ assert_dt
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before first tile */
static void
fd4_emit_tile_init(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before IB to rendering cmds: */
static void
fd4_emit_tile_renderprep(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd4_context *fd4_ctx = fd4_context(ctx);
void
fd4_gmem_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static void
time_elapsed_enable(struct fd_context *ctx, struct fd_ringbuffer *ring)
+ assert_dt
{
/* Right now, the assignment of countable to counter register is
* just hard coded. If we start exposing more countables than we
static struct fd_hw_sample *
time_elapsed_get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring)
+ assert_dt
{
struct fd_hw_sample *samp = fd_hw_sample_init(batch, sizeof(uint64_t));
};
void fd4_query_context_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
bool
fd5_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
struct fd_batch *batch;
static void
fd5_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
+ assert_dt
{
struct ir3_shader_key key = {};
struct ir3_shader_variant *v;
void
fd5_compute_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->launch_grid = fd5_launch_grid;
static void
fd5_context_destroy(struct pipe_context *pctx)
+ in_dt
{
struct fd5_context *fd5_ctx = fd5_context(fd_context(pctx));
struct pipe_context *
fd5_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+ disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct fd5_context *fd5_ctx = CALLOC_STRUCT(fd5_context);
static void
draw_impl(struct fd_context *ctx, struct fd_ringbuffer *ring,
struct fd5_emit *emit, unsigned index_offset)
+ assert_dt
{
const struct pipe_draw_info *info = emit->info;
enum pc_di_primtype primtype = ctx->primtypes[info->mode];
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
+ assert_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct ir3_shader_key *last_key = &fd5_ctx->last_key;
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
+ in_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct fd5_emit emit = {
static bool
fd5_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
+ assert_dt
{
struct fd_ringbuffer *ring = ctx->batch->draw;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
void
fd5_draw_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd5_draw_vbo;
static void
emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
+ assert_dt
{
struct fd5_context *fd5_ctx = fd5_context(ctx);
struct bcolor_entry *entries;
static bool
emit_textures(struct fd_context *ctx, struct fd_ringbuffer *ring,
enum a4xx_state_block sb, struct fd_texture_stateobj *tex)
+ assert_dt
{
bool needs_border = false;
unsigned bcolor_offset = (sb == SB4_FS_TEX) ? ctx->tex[PIPE_SHADER_VERTEX].num_samplers : 0;
static inline void
fd5_cache_flush(struct fd_batch *batch, struct fd_ringbuffer *ring)
+ assert_dt
{
fd_reset_wfi(batch);
OUT_PKT4(ring, REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO, 5);
static inline void
fd5_emit_render_cntl(struct fd_context *ctx, bool blit, bool binning)
+ assert_dt
{
struct fd_ringbuffer *ring = binning ? ctx->batch->binning : ctx->batch->draw;
OUT_RING(ring, 0x0);
}
-void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit);
+void fd5_emit_vertex_bufs(struct fd_ringbuffer *ring, struct fd5_emit *emit) assert_dt;
void fd5_emit_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct fd5_emit *emit);
+ struct fd5_emit *emit) assert_dt;
void fd5_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct ir3_shader_variant *cp);
+ struct ir3_shader_variant *cp) assert_dt;
void fd5_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
- struct fd_context *ctx, const struct pipe_grid_info *info);
+ struct fd_context *ctx, const struct pipe_grid_info *info) assert_dt;
-void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
+void fd5_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd5_emit_init_screen(struct pipe_screen *pscreen);
void fd5_emit_init(struct pipe_context *pctx);
static void
update_vsc_pipe(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd5_context *fd5_ctx = fd5_context(ctx);
static void
emit_binning_pass(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_ringbuffer *ring = batch->gmem;
/* before first tile */
static void
fd5_emit_tile_init(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
/* before mem2gmem */
static void
fd5_emit_tile_prep(struct fd_batch *batch, const struct fd_tile *tile)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
static void
fd5_emit_tile_fini(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
static void
fd5_emit_sysmem_prep(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
void
fd5_gmem_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static void
timestamp_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
timestamp_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
void
fd5_query_context_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static void
fd6_clear_ubwc(struct fd_batch *batch, struct fd_resource *rsc)
+ assert_dt
{
struct fd_ringbuffer *ring = fd_batch_get_prologue(batch);
union pipe_color_union color = {};
static bool
handle_rgba_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
struct fd_batch *batch;
*/
static bool
do_rewritten_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
bool success = handle_rgba_blit(ctx, info);
if (!success)
*/
static bool
handle_zs_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
struct pipe_blit_info blit = *info;
static bool
handle_compressed_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
struct pipe_blit_info blit = *info;
static bool
fd6_blit(struct fd_context *ctx, const struct pipe_blit_info *info)
+ assert_dt
{
if (info->mask & PIPE_MASK_ZS)
return handle_zs_blit(ctx, info);
void
fd6_blitter_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
fd_context(pctx)->clear_ubwc = fd6_clear_ubwc;
void fd6_clear_surface(struct fd_context *ctx,
struct fd_ringbuffer *ring, struct pipe_surface *psurf,
- uint32_t width, uint32_t height, union pipe_color_union *color);
+ uint32_t width, uint32_t height, union pipe_color_union *color) assert_dt;
void fd6_resolve_tile(struct fd_batch *batch, struct fd_ringbuffer *ring,
- uint32_t base, struct pipe_surface *psurf);
+ uint32_t base, struct pipe_surface *psurf) assert_dt;
#endif /* FD6_BLIT_H_ */
/* maybe move to fd6_program? */
static void
cs_program_emit(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct ir3_shader_variant *v)
+ struct ir3_shader_variant *v) assert_dt
{
const struct ir3_info *i = &v->info;
enum a3xx_threadsize thrsz = FOUR_QUADS;
static void
fd6_launch_grid(struct fd_context *ctx, const struct pipe_grid_info *info)
+ in_dt
{
struct ir3_shader_key key = {};
struct ir3_shader_variant *v;
void
fd6_compute_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->launch_grid = fd6_launch_grid;
static void
emit_tess_bos(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_shader_variant *s)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
const struct ir3_const_state *const_state = ir3_const_state(s);
static void
emit_tess_consts(struct fd6_emit *emit)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
static void
emit_user_consts(struct fd6_emit *emit)
+ assert_dt
{
static const enum pipe_shader_type types[] = {
PIPE_SHADER_VERTEX, PIPE_SHADER_TESS_CTRL, PIPE_SHADER_TESS_EVAL,
#include "fd6_emit.h"
-void fd6_emit_consts(struct fd6_emit *emit);
+void fd6_emit_consts(struct fd6_emit *emit) assert_dt;
void fd6_emit_ibo_consts(struct fd6_emit *emit, const struct ir3_shader_variant *v,
- enum pipe_shader_type stage, struct fd_ringbuffer *ring);
+ enum pipe_shader_type stage, struct fd_ringbuffer *ring) assert_dt;
void fd6_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
- struct fd_context *ctx, const struct pipe_grid_info *info);
+ struct fd_context *ctx, const struct pipe_grid_info *info) assert_dt;
void fd6_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
- struct fd_ringbuffer *ring);
+ struct fd_ringbuffer *ring) assert_dt;
void fd6_emit_link_map(struct fd_screen *screen,
const struct ir3_shader_variant *producer,
- const struct ir3_shader_variant *v, struct fd_ringbuffer *ring);
+ const struct ir3_shader_variant *v, struct fd_ringbuffer *ring) assert_dt;
#endif /* FD6_CONST_H */
static void
fd6_context_destroy(struct pipe_context *pctx)
+ in_dt
{
struct fd6_context *fd6_ctx = fd6_context(fd_context(pctx));
struct pipe_context *
fd6_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
+ disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct fd6_context *fd6_ctx = CALLOC_STRUCT(fd6_context);
*/
static void
fixup_shader_state(struct fd_context *ctx, struct ir3_shader_key *key)
+ assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct ir3_shader_key *last_key = &fd6_ctx->last_key;
static void
fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit)
+ assert_dt
{
if (ctx->last.dirty ||
(ctx->last.primitive_restart != emit->primitive_restart)) {
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw,
unsigned index_offset)
+ assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct shader_info *gs_info = ir3_get_shader_info(ctx->prog.gs);
static bool
fd6_clear(struct fd_context *ctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
+ assert_dt
{
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
const bool has_depth = pfb->zsbuf;
void
fd6_draw_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->draw_vbo = fd6_draw_vbo;
static void
emit_border_color(struct fd_context *ctx, struct fd_ringbuffer *ring)
+ assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct bcolor_entry *entries;
static void
fd6_emit_fb_tex(struct fd_ringbuffer *state, struct fd_context *ctx)
+ assert_dt
{
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
struct pipe_surface *psurf = pfb->cbufs[0];
static bool
fd6_emit_combined_textures(struct fd_ringbuffer *ring, struct fd6_emit *emit,
enum pipe_shader_type type, const struct ir3_shader_variant *v)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
bool needs_border = false;
static struct fd_ringbuffer *
build_vbo_state(struct fd6_emit *emit)
+ assert_dt
{
const struct fd_vertex_state *vtx = emit->vtx;
static enum a6xx_ztest_mode
compute_ztest_mode(struct fd6_emit *emit, bool lrz_valid)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
*/
static struct fd6_lrz_state
compute_lrz_state(struct fd6_emit *emit, bool binning_pass)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
static struct fd_ringbuffer *
build_lrz(struct fd6_emit *emit, bool binning_pass)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
struct fd6_context *fd6_ctx = fd6_context(ctx);
}
static void
-fd6_emit_streamout(struct fd_ringbuffer *ring, struct fd6_emit *emit, struct ir3_stream_output_info *info)
+fd6_emit_streamout(struct fd_ringbuffer *ring, struct fd6_emit *emit,
+ struct ir3_stream_output_info *info)
+ assert_dt
{
struct fd_context *ctx = emit->ctx;
const struct fd6_program_state *prog = fd6_emit_get_prog(emit);
*/
static void
fd6_framebuffer_barrier(struct fd_context *ctx)
+ assert_dt
{
struct fd6_context *fd6_ctx = fd6_context(ctx);
struct fd_batch *batch = ctx->batch;
void
fd6_emit_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
ctx->framebuffer_barrier = fd6_framebuffer_barrier;
bool fd6_emit_textures(struct fd_pipe *pipe, struct fd_ringbuffer *ring,
enum pipe_shader_type type, struct fd_texture_stateobj *tex,
unsigned bcolor_offset,
- const struct ir3_shader_variant *v, struct fd_context *ctx);
+ const struct ir3_shader_variant *v, struct fd_context *ctx) assert_dt;
-void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit);
+void fd6_emit_state(struct fd_ringbuffer *ring, struct fd6_emit *emit) assert_dt;
void fd6_emit_cs_state(struct fd_context *ctx, struct fd_ringbuffer *ring,
- struct ir3_shader_variant *cp);
+ struct ir3_shader_variant *cp) assert_dt;
void fd6_emit_restore(struct fd_batch *batch, struct fd_ringbuffer *ring);
static void
emit_binning_pass(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
/* before first tile */
static void
fd6_emit_tile_init(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
uint32_t base,
struct pipe_surface *psurf,
unsigned buffer)
+ assert_dt
{
uint32_t info = 0;
bool stencil = false;
static void
prepare_tile_fini_ib(struct fd_batch *batch)
+ assert_dt
{
const struct fd_gmem_stateobj *gmem = batch->gmem_state;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
static void
emit_sysmem_clears(struct fd_batch *batch, struct fd_ringbuffer *ring)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
static void
fd6_emit_sysmem_prep(struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->gmem;
struct fd_screen *screen = batch->ctx->screen;
void
fd6_gmem_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
#include "freedreno_context.h"
-void fd6_emit_image_tex(struct fd_ringbuffer *ring, const struct pipe_image_view *pimg);
-void fd6_emit_ssbo_tex(struct fd_ringbuffer *ring, const struct pipe_shader_buffer *pbuf);
+void fd6_emit_image_tex(struct fd_ringbuffer *ring, const struct pipe_image_view *pimg) assert_dt;
+void fd6_emit_ssbo_tex(struct fd_ringbuffer *ring, const struct pipe_shader_buffer *pbuf) assert_dt;
struct ir3_shader_variant;
struct fd_ringbuffer * fd6_build_ibo_state(struct fd_context *ctx,
- const struct ir3_shader_variant *v, enum pipe_shader_type shader);
+ const struct ir3_shader_variant *v, enum pipe_shader_type shader) assert_dt;
void fd6_image_init(struct pipe_context *pctx);
setup_stateobj(struct fd_ringbuffer *ring, struct fd_context *ctx,
struct fd6_program_state *state, const struct ir3_shader_key *key,
bool binning_pass)
+ assert_dt
{
uint32_t pos_regid, psize_regid, color_regid[8], posz_regid;
uint32_t clip0_regid, clip1_regid;
struct ir3_shader_variant *gs,
struct ir3_shader_variant *fs,
const struct ir3_shader_key *key)
+ in_dt
{
- struct fd_context *ctx = data;
+ struct fd_context *ctx = fd_context(data);
struct fd6_program_state *state = CALLOC_STRUCT(fd6_program_state);
/* if we have streamout, use full VS in binning pass, as the
}
void fd6_emit_shader(struct fd_context *ctx, struct fd_ringbuffer *ring,
- const struct ir3_shader_variant *so);
+ const struct ir3_shader_variant *so) assert_dt;
-struct fd_ringbuffer * fd6_program_interp_state(struct fd6_emit *emit);
+struct fd_ringbuffer * fd6_program_interp_state(struct fd6_emit *emit) assert_dt;
void fd6_prog_init(struct pipe_context *pctx);
static void
occlusion_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
time_elapsed_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
primitives_generated_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
primitives_generated_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
primitives_emitted_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
primitives_emitted_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_ringbuffer *ring = batch->draw;
static void
perfcntr_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
static void
perfcntr_pause(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_query_data *data = aq->query_data;
struct fd_screen *screen = data->screen;
void
fd6_query_context_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
static inline struct fd_ringbuffer *
fd6_rasterizer_state(struct fd_context *ctx, bool primitive_restart)
+ assert_dt
{
struct fd6_rasterizer_stateobj *rasterizer = fd6_rasterizer_stateobj(ctx->rasterizer);
unsigned variant = primitive_restart;
void
fd6_validate_format(struct fd_context *ctx, struct fd_resource *rsc,
enum pipe_format format)
+ in_dt /* TODO this will be re-worked with threaded-ctx, this is just temporary */
{
if (!rsc->layout.ubwc)
return;
static void
fd6_rebind_resource(struct fd_context *ctx, struct fd_resource *rsc)
+ assert_dt
{
fd_screen_assert_locked(ctx->screen);
void
fd6_texture_init(struct pipe_context *pctx)
+ disable_thread_safety_analysis
{
struct fd_context *ctx = fd_context(pctx);
struct fd6_context *fd6_ctx = fd6_context(ctx);
static inline unsigned
fd6_border_color_offset(struct fd_context *ctx, enum pipe_shader_type type,
struct fd_texture_stateobj *tex)
+ assert_dt
{
/* Currently we put the FS border-color state after VS. Possibly
* we could swap the order.
};
struct fd6_texture_state * fd6_texture_state(struct fd_context *ctx,
- enum pipe_shader_type type, struct fd_texture_stateobj *tex);
+ enum pipe_shader_type type, struct fd_texture_stateobj *tex) assert_dt;
/* not called directly: */
void __fd6_texture_state_describe(char* buf, const struct fd6_texture_state *tex);
static inline struct fd_ringbuffer *
fd6_zsa_state(struct fd_context *ctx, bool no_alpha, bool depth_clamp)
+ assert_dt
{
int variant = 0;
if (no_alpha)
static void
batch_flush_dependencies(struct fd_batch *batch)
+ assert_dt
{
struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache;
struct fd_batch *dep;
static void
batch_reset_resources(struct fd_batch *batch)
+ assert_dt
{
fd_screen_lock(batch->ctx->screen);
batch_reset_resources_locked(batch);
static void
batch_reset(struct fd_batch *batch)
+ assert_dt
{
DBG("%p", batch);
/* Only called from fd_batch_flush() */
static void
batch_flush(struct fd_batch *batch)
+ assert_dt
{
DBG("%p: needs_flush=%d", batch, batch->needs_flush);
static void
flush_write_batch(struct fd_resource *rsc)
+ assert_dt
{
struct fd_batch *b = NULL;
fd_batch_reference_locked(&b, rsc->write_batch);
struct fd_batch * fd_batch_create(struct fd_context *ctx, bool nondraw);
-void fd_batch_reset(struct fd_batch *batch);
-void fd_batch_flush(struct fd_batch *batch);
-void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep);
-void fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc);
-void fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc);
-void fd_batch_check_size(struct fd_batch *batch);
+void fd_batch_reset(struct fd_batch *batch) assert_dt;
+void fd_batch_flush(struct fd_batch *batch) assert_dt;
+void fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep) assert_dt;
+void fd_batch_resource_write(struct fd_batch *batch, struct fd_resource *rsc) assert_dt;
+void fd_batch_resource_read_slowpath(struct fd_batch *batch, struct fd_resource *rsc) assert_dt;
+void fd_batch_check_size(struct fd_batch *batch) assert_dt;
/* not called directly: */
-void __fd_batch_describe(char* buf, const struct fd_batch *batch);
+void __fd_batch_describe(char* buf, const struct fd_batch *batch) assert_dt;
void __fd_batch_destroy(struct fd_batch *batch);
/*
* the batch before each draw.
*/
static inline void fd_batch_update_queries(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
}
static inline void fd_batch_finish_queries(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
batch->needs_wfi = true;
}
-void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring);
+void fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
/* emit a CP_EVENT_WRITE:
*/
static void
bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx, bool deferred)
+ assert_dt
{
/* fd_batch_flush() (and fd_batch_add_dep() which calls it indirectly)
* can cause batches to be unref'd and freed under our feet, so grab
static struct fd_batch *
alloc_batch_locked(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw)
+ assert_dt
{
struct fd_batch *batch;
uint32_t idx;
static struct fd_batch *
batch_from_key(struct fd_batch_cache *cache, struct fd_batch_key *key,
struct fd_context *ctx)
+ assert_dt
{
struct fd_batch *batch = NULL;
uint32_t hash = key_hash(key);
#include "pipe/p_state.h"
+#include "freedreno_util.h"
+
struct fd_resource;
struct fd_batch;
struct fd_context;
void fd_bc_init(struct fd_batch_cache *cache);
void fd_bc_fini(struct fd_batch_cache *cache);
-void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx);
-void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx);
+void fd_bc_flush(struct fd_batch_cache *cache, struct fd_context *ctx) assert_dt;
+void fd_bc_flush_deferred(struct fd_batch_cache *cache, struct fd_context *ctx) assert_dt;
void fd_bc_dump(struct fd_screen *screen, const char *fmt, ...) _util_printf_format(2, 3);
void fd_bc_invalidate_context(struct fd_context *ctx);
void fd_bc_invalidate_batch(struct fd_batch *batch, bool destroy);
void fd_bc_invalidate_resource(struct fd_resource *rsc, bool destroy);
-struct fd_batch * fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw);
+struct fd_batch * fd_bc_alloc_batch(struct fd_batch_cache *cache, struct fd_context *ctx, bool nondraw) assert_dt;
struct fd_batch * fd_batch_from_fb(struct fd_batch_cache *cache,
- struct fd_context *ctx, const struct pipe_framebuffer_state *pfb);
+ struct fd_context *ctx, const struct pipe_framebuffer_state *pfb) assert_dt;
#endif /* FREEDRENO_BATCH_CACHE_H_ */
static void
fd_blitter_pipe_begin(struct fd_context *ctx, bool render_cond, bool discard)
+ assert_dt
{
fd_fence_ref(&ctx->last_fence, NULL);
static void
fd_blitter_pipe_end(struct fd_context *ctx)
+ assert_dt
{
ctx->in_discard_blit = false;
}
struct pipe_resource *src,
unsigned src_level,
const struct pipe_box *src_box)
+ assert_dt
{
/* not until we allow rendertargets to be buffers */
if (dst->target == PIPE_BUFFER || src->target == PIPE_BUFFER)
#include "freedreno_context.h"
-bool fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info);
+bool fd_blitter_blit(struct fd_context *ctx, const struct pipe_blit_info *info) assert_dt;
void
fd_blitter_clear(struct pipe_context *pctx, unsigned buffers,
- const union pipe_color_union *color, double depth, unsigned stencil);
+ const union pipe_color_union *color, double depth, unsigned stencil) assert_dt;
void fd_resource_copy_region(struct pipe_context *pctx,
struct pipe_resource *dst,
unsigned dstx, unsigned dsty, unsigned dstz,
struct pipe_resource *src,
unsigned src_level,
- const struct pipe_box *src_box);
+ const struct pipe_box *src_box) assert_dt;
-bool fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info);
+bool fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info) assert_dt;
#endif /* FREEDRENO_BLIT_H_ */
static void
fd_context_flush(struct pipe_context *pctx, struct pipe_fence_handle **fencep,
unsigned flags)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_fence_handle *fence = NULL;
static void
fd_texture_barrier(struct pipe_context *pctx, unsigned flags)
+ in_dt
{
if (flags == PIPE_TEXTURE_BARRIER_FRAMEBUFFER) {
struct fd_context *ctx = fd_context(pctx);
*/
static void
fd_emit_string_marker(struct pipe_context *pctx, const char *string, int len)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
int global_faults = fd_get_reset_count(ctx, false);
enum pipe_reset_status status;
+ /* Not called in driver thread, but threaded_context syncs
+ * before calling this:
+ */
+ fd_context_access_begin(ctx);
+
if (context_faults != ctx->context_reset_count) {
status = PIPE_GUILTY_CONTEXT_RESET;
} else if (global_faults != ctx->global_reset_count) {
ctx->context_reset_count = context_faults;
ctx->global_reset_count = global_faults;
+ fd_context_access_end(ctx);
+
return status;
}
struct pipe_context *
fd_context_init(struct fd_context *ctx, struct pipe_screen *pscreen,
const uint8_t *primtypes, void *priv, unsigned flags)
+ disable_thread_safety_analysis
{
struct fd_screen *screen = fd_screen(pscreen);
struct pipe_context *pctx;
struct fd_screen *screen;
struct fd_pipe *pipe;
- struct blitter_context *blitter;
- void *clear_rs_state[2];
- struct primconvert_context *primconvert;
+ struct blitter_context *blitter dt;
+ void *clear_rs_state[2] dt;
+ struct primconvert_context *primconvert dt;
/* slab for pipe_transfer allocations: */
- struct slab_child_pool transfer_pool;
+ struct slab_child_pool transfer_pool dt;
/**
* query related state:
*/
/*@{*/
/* slabs for fd_hw_sample and fd_hw_sample_period allocations: */
- struct slab_mempool sample_pool;
- struct slab_mempool sample_period_pool;
+ struct slab_mempool sample_pool dt;
+ struct slab_mempool sample_period_pool dt;
/* sample-providers for hw queries: */
const struct fd_hw_sample_provider *hw_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
/* list of active queries: */
- struct list_head hw_active_queries;
+ struct list_head hw_active_queries dt;
/* sample-providers for accumulating hw queries: */
const struct fd_acc_sample_provider *acc_sample_providers[MAX_HW_SAMPLE_PROVIDERS];
/* list of active accumulating queries: */
- struct list_head acc_active_queries;
+ struct list_head acc_active_queries dt;
/*@}*/
/* Whether we need to recheck the active_queries list next
* fd_batch_update_queries().
*/
- bool update_active_queries;
+ bool update_active_queries dt;
/* Current state of pctx->set_active_query_state() (i.e. "should drawing
* be counted against non-perfcounter queries")
*/
- bool active_queries;
+ bool active_queries dt;
/* table with PIPE_PRIM_MAX entries mapping PIPE_PRIM_x to
* DI_PT_x value to use for draw initiator. There are some
uint64_t batch_total, batch_sysmem, batch_gmem, batch_nondraw, batch_restore;
uint64_t staging_uploads, shadow_uploads;
uint64_t vs_regs, hs_regs, ds_regs, gs_regs, fs_regs;
- } stats;
+ } stats dt;
/* Current batch.. the rule here is that you can deref ctx->batch
* in codepaths from pipe_context entrypoints. But not in code-
* called from GMEM code), since in those code-paths the batch
* you care about is not necessarily the same as ctx->batch.
*/
- struct fd_batch *batch;
+ struct fd_batch *batch dt;
/* NULL if there has been rendering since last flush. Otherwise
* keeps a reference to the last fence so we can re-use it rather
* than having to flush no-op batch.
*/
- struct pipe_fence_handle *last_fence;
+ struct pipe_fence_handle *last_fence dt;
/* Fence fd we are told to wait on via ->fence_server_sync() (or -1
* if none). The in-fence is transferred over to the batch on the
* maturely, causing us to stall early in the frame where we could
* be building up cmdstream.
*/
- int in_fence_fd;
+ int in_fence_fd dt;
/* track last known reset status globally and per-context to
* determine if more resets occurred since then. If global reset
* per-context reset count increases, it means we crashed the
* gpu.
*/
- uint32_t context_reset_count, global_reset_count;
+ uint32_t context_reset_count dt;
+ uint32_t global_reset_count dt;
/* Context sequence #, used for batch-cache key: */
uint16_t seqno;
/* Are we in process of shadowing a resource? Used to detect recursion
* in transfer_map, and skip unneeded synchronization.
*/
- bool in_shadow : 1;
+ bool in_shadow : 1 dt;
/* Ie. in blit situation where we no longer care about previous framebuffer
* contents. Main point is to eliminate blits from fd_try_shadow_resource().
* For example, in case of texture upload + gen-mipmaps.
*/
- bool in_discard_blit : 1;
+ bool in_discard_blit : 1 dt;
/* points to either scissor or disabled_scissor depending on rast state: */
- struct pipe_scissor_state *current_scissor;
+ struct pipe_scissor_state *current_scissor dt;
- struct pipe_scissor_state scissor;
+ struct pipe_scissor_state scissor dt;
/* we don't have a disable/enable bit for scissor, so instead we keep
* a disabled-scissor state which matches the entire bound framebuffer
* and use that when scissor is not enabled.
*/
- struct pipe_scissor_state disabled_scissor;
+ struct pipe_scissor_state disabled_scissor dt;
/* Per vsc pipe bo's (a2xx-a5xx): */
- struct fd_bo *vsc_pipe_bo[32];
+ struct fd_bo *vsc_pipe_bo[32] dt;
/* which state objects need to be re-emit'd: */
- enum fd_dirty_3d_state dirty;
+ enum fd_dirty_3d_state dirty dt;
/* per shader-stage dirty status: */
- enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES];
+ enum fd_dirty_shader_state dirty_shader[PIPE_SHADER_TYPES] dt;
- void *compute;
- struct pipe_blend_state *blend;
- struct pipe_rasterizer_state *rasterizer;
- struct pipe_depth_stencil_alpha_state *zsa;
+ void *compute dt;
+ struct pipe_blend_state *blend dt;
+ struct pipe_rasterizer_state *rasterizer dt;
+ struct pipe_depth_stencil_alpha_state *zsa dt;
- struct fd_texture_stateobj tex[PIPE_SHADER_TYPES];
+ struct fd_texture_stateobj tex[PIPE_SHADER_TYPES] dt;
- struct fd_program_stateobj prog;
+ struct fd_program_stateobj prog dt;
- struct fd_vertex_state vtx;
+ struct fd_vertex_state vtx dt;
- struct pipe_blend_color blend_color;
- struct pipe_stencil_ref stencil_ref;
- unsigned sample_mask;
- unsigned min_samples;
+ struct pipe_blend_color blend_color dt;
+ struct pipe_stencil_ref stencil_ref dt;
+ unsigned sample_mask dt;
+ unsigned min_samples dt;
/* local context fb state, for when ctx->batch is null: */
- struct pipe_framebuffer_state framebuffer;
- struct pipe_poly_stipple stipple;
- struct pipe_viewport_state viewport;
- struct pipe_scissor_state viewport_scissor;
- struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES];
- struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES];
- struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES];
- struct fd_streamout_stateobj streamout;
- struct fd_global_bindings_stateobj global_bindings;
- struct pipe_clip_state ucp;
-
- struct pipe_query *cond_query;
- bool cond_cond; /* inverted rendering condition */
- uint cond_mode;
+ struct pipe_framebuffer_state framebuffer dt;
+ struct pipe_poly_stipple stipple dt;
+ struct pipe_viewport_state viewport dt;
+ struct pipe_scissor_state viewport_scissor dt;
+ struct fd_constbuf_stateobj constbuf[PIPE_SHADER_TYPES] dt;
+ struct fd_shaderbuf_stateobj shaderbuf[PIPE_SHADER_TYPES] dt;
+ struct fd_shaderimg_stateobj shaderimg[PIPE_SHADER_TYPES] dt;
+ struct fd_streamout_stateobj streamout dt;
+ struct fd_global_bindings_stateobj global_bindings dt;
+ struct pipe_clip_state ucp dt;
+
+ struct pipe_query *cond_query dt;
+ bool cond_cond dt; /* inverted rendering condition */
+ uint cond_mode dt;
/* Private memory is a memory space where each fiber gets its own piece of
* memory, in addition to registers. It is backed by a buffer which needs
struct {
struct fd_bo *bo;
uint32_t per_fiber_size;
- } pvtmem[2];
+ } pvtmem[2] dt;
struct pipe_debug_callback debug;
- struct u_trace_context trace_context;
+ struct u_trace_context trace_context dt;
/* Called on rebind_resource() for any per-gen cleanup required: */
- void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc);
+ void (*rebind_resource)(struct fd_context *ctx, struct fd_resource *rsc) dt;
/* GMEM/tile handling fxns: */
- void (*emit_tile_init)(struct fd_batch *batch);
- void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile);
- void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile);
- void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile);
- void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile);
- void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile);
- void (*emit_tile_fini)(struct fd_batch *batch); /* optional */
+ void (*emit_tile_init)(struct fd_batch *batch) dt;
+ void (*emit_tile_prep)(struct fd_batch *batch, const struct fd_tile *tile) dt;
+ void (*emit_tile_mem2gmem)(struct fd_batch *batch, const struct fd_tile *tile) dt;
+ void (*emit_tile_renderprep)(struct fd_batch *batch, const struct fd_tile *tile) dt;
+ void (*emit_tile)(struct fd_batch *batch, const struct fd_tile *tile) dt;
+ void (*emit_tile_gmem2mem)(struct fd_batch *batch, const struct fd_tile *tile) dt;
+ void (*emit_tile_fini)(struct fd_batch *batch) dt; /* optional */
/* optional, for GMEM bypass: */
- void (*emit_sysmem_prep)(struct fd_batch *batch);
- void (*emit_sysmem_fini)(struct fd_batch *batch);
+ void (*emit_sysmem_prep)(struct fd_batch *batch) dt;
+ void (*emit_sysmem_fini)(struct fd_batch *batch) dt;
/* draw: */
bool (*draw_vbo)(struct fd_context *ctx, const struct pipe_draw_info *info,
- const struct pipe_draw_indirect_info *indirect,
- const struct pipe_draw_start_count *draw,
- unsigned index_offset);
+ const struct pipe_draw_indirect_info *indirect,
+ const struct pipe_draw_start_count *draw,
+ unsigned index_offset) dt;
bool (*clear)(struct fd_context *ctx, unsigned buffers,
- const union pipe_color_union *color, double depth, unsigned stencil);
+ const union pipe_color_union *color, double depth, unsigned stencil) dt;
/* compute: */
- void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info);
+ void (*launch_grid)(struct fd_context *ctx, const struct pipe_grid_info *info) dt;
/* query: */
struct fd_query * (*create_query)(struct fd_context *ctx, unsigned query_type, unsigned index);
- void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles);
+ void (*query_prepare)(struct fd_batch *batch, uint32_t num_tiles) dt;
void (*query_prepare_tile)(struct fd_batch *batch, uint32_t n,
- struct fd_ringbuffer *ring);
- void (*query_update_batch)(struct fd_batch *batch, bool disable_all);
+ struct fd_ringbuffer *ring) dt;
+ void (*query_update_batch)(struct fd_batch *batch, bool disable_all) dt;
/* blitter: */
- bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info);
- void (*clear_ubwc)(struct fd_batch *batch, struct fd_resource *rsc);
+ bool (*blit)(struct fd_context *ctx, const struct pipe_blit_info *info) dt;
+ void (*clear_ubwc)(struct fd_batch *batch, struct fd_resource *rsc) dt;
/* handling for barriers: */
- void (*framebuffer_barrier)(struct fd_context *ctx);
+ void (*framebuffer_barrier)(struct fd_context *ctx) dt;
/* logger: */
void (*record_timestamp)(struct fd_ringbuffer *ring, struct fd_bo *bo, unsigned offset);
uint32_t instance_start;
uint32_t restart_index;
uint32_t streamout_mask;
- } last;
+ } last dt;
};
static inline struct fd_context *
/* mark all state dirty: */
static inline void
fd_context_all_dirty(struct fd_context *ctx)
+ assert_dt
{
ctx->last.dirty = true;
ctx->dirty = ~0;
static inline void
fd_context_all_clean(struct fd_context *ctx)
+ assert_dt
{
ctx->last.dirty = false;
ctx->dirty = 0;
static inline struct pipe_scissor_state *
fd_context_get_scissor(struct fd_context *ctx)
+ assert_dt
{
return ctx->current_scissor;
}
return (1 << prim) & ctx->primtype_mask;
}
-void fd_context_switch_from(struct fd_context *ctx);
-void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch);
-struct fd_batch * fd_context_batch(struct fd_context *ctx);
-struct fd_batch * fd_context_batch_locked(struct fd_context *ctx);
+void fd_context_switch_from(struct fd_context *ctx) assert_dt;
+void fd_context_switch_to(struct fd_context *ctx, struct fd_batch *batch) assert_dt;
+struct fd_batch * fd_context_batch(struct fd_context *ctx) assert_dt;
+struct fd_batch * fd_context_batch_locked(struct fd_context *ctx) assert_dt;
void fd_context_setup_common_vbos(struct fd_context *ctx);
void fd_context_cleanup_common_vbos(struct fd_context *ctx);
struct pipe_screen *pscreen, const uint8_t *primtypes,
void *priv, unsigned flags);
-void fd_context_destroy(struct pipe_context *pctx);
+void fd_context_destroy(struct pipe_context *pctx) assert_dt;
#endif /* FREEDRENO_CONTEXT_H_ */
static void
resource_read(struct fd_batch *batch, struct pipe_resource *prsc)
+ assert_dt
{
if (!prsc)
return;
static void
resource_written(struct fd_batch *batch, struct pipe_resource *prsc)
+ assert_dt
{
if (!prsc)
return;
static void
batch_draw_tracking_for_dirty_bits(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
static void
batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draws,
unsigned num_draws)
+ in_dt
{
if (num_draws > 1) {
struct pipe_draw_info tmp_info = *info;
static void
batch_clear_tracking(struct fd_batch *batch, unsigned buffers)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct pipe_framebuffer_state *pfb = &batch->framebuffer;
const struct pipe_scissor_state *scissor_state,
const union pipe_color_union *color, double depth,
unsigned stencil)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
static void
fd_launch_grid(struct pipe_context *pctx, const struct pipe_grid_info *info)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
};
static void fence_flush(struct pipe_fence_handle *fence)
+ /* TODO this will change w/ threaded-ctx where we need to use threaded_context_flush().. */
+ in_dt
{
if (fence->batch)
fd_batch_flush(fence->batch);
static void
render_tiles(struct fd_batch *batch, struct fd_gmem_stateobj *gmem)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
int i;
static void
render_sysmem(struct fd_batch *batch)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_batch;
-void fd_gmem_render_tiles(struct fd_batch *batch);
+void fd_gmem_render_tiles(struct fd_batch *batch) assert_dt;
unsigned fd_gmem_estimate_bins_per_pipe(struct fd_batch *batch);
bool fd_gmem_needs_restore(struct fd_batch *batch, const struct fd_tile *tile,
uint32_t buffers);
static void
fd_vs_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.vs = hwcso;
static void
fd_tcs_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.hs = hwcso;
static void
fd_tes_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.ds = hwcso;
static void
fd_gs_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.gs = hwcso;
static void
fd_fs_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->prog.fs = hwcso;
static void
fd_destroy_query(struct pipe_context *pctx, struct pipe_query *pq)
+ in_dt
{
struct fd_query *q = fd_query(pq);
q->funcs->destroy_query(fd_context(pctx), q);
static bool
fd_begin_query(struct pipe_context *pctx, struct pipe_query *pq)
+ in_dt
{
struct fd_query *q = fd_query(pq);
static bool
fd_end_query(struct pipe_context *pctx, struct pipe_query *pq)
+ in_dt
{
struct fd_query *q = fd_query(pq);
static void
fd_render_condition(struct pipe_context *pctx, struct pipe_query *pq,
bool condition, enum pipe_render_cond_flag mode)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->cond_query = pq;
static void
fd_set_active_query_state(struct pipe_context *pctx, bool enable)
+ assert_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->active_queries = enable;
#include "pipe/p_context.h"
+#include "freedreno_util.h"
+
struct fd_context;
struct fd_query;
struct fd_query_funcs {
void (*destroy_query)(struct fd_context *ctx,
- struct fd_query *q);
- void (*begin_query)(struct fd_context *ctx, struct fd_query *q);
- void (*end_query)(struct fd_context *ctx, struct fd_query *q);
+ struct fd_query *q) dt;
+ void (*begin_query)(struct fd_context *ctx, struct fd_query *q) dt;
+ void (*end_query)(struct fd_context *ctx, struct fd_query *q) dt;
bool (*get_query_result)(struct fd_context *ctx,
struct fd_query *q, bool wait,
union pipe_query_result *result);
static void
fd_acc_destroy_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
static void
fd_acc_query_pause(struct fd_acc_query *aq)
+ assert_dt
{
const struct fd_acc_sample_provider *p = aq->provider;
static void
fd_acc_query_resume(struct fd_acc_query *aq, struct fd_batch *batch)
+ assert_dt
{
const struct fd_acc_sample_provider *p = aq->provider;
static void
fd_acc_begin_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
static void
fd_acc_end_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_acc_query *aq = fd_acc_query(q);
* wait to flush unnecessarily but we also don't want to
* spin forever:
*/
- if (aq->no_wait_cnt++ > 5)
+ if (aq->no_wait_cnt++ > 5) {
+ fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
+ fd_context_access_end(ctx);
+ }
return false;
}
fd_bo_cpu_fini(rsc->bo);
}
- if (rsc->write_batch)
+ if (rsc->write_batch) {
+ fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
+ fd_context_access_end(ctx);
+ }
/* get the result: */
fd_bo_cpu_prep(rsc->bo, ctx->pipe, DRM_FREEDRENO_PREP_READ);
unsigned size;
- void (*resume)(struct fd_acc_query *aq, struct fd_batch *batch);
- void (*pause)(struct fd_acc_query *aq, struct fd_batch *batch);
+ void (*resume)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
+ void (*pause)(struct fd_acc_query *aq, struct fd_batch *batch) dt;
void (*result)(struct fd_acc_query *aq, void *buf,
union pipe_query_result *result);
unsigned index);
struct fd_query * fd_acc_create_query2(struct fd_context *ctx, unsigned query_type,
unsigned index, const struct fd_acc_sample_provider *provider);
-void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all);
+void fd_acc_query_update_batch(struct fd_batch *batch, bool disable_all) assert_dt;
void fd_acc_query_register_provider(struct pipe_context *pctx,
const struct fd_acc_sample_provider *provider);
static struct fd_hw_sample *
get_sample(struct fd_batch *batch, struct fd_ringbuffer *ring,
unsigned query_type)
+ assert_dt
{
struct fd_context *ctx = batch->ctx;
struct fd_hw_sample *samp = NULL;
static void
resume_query(struct fd_batch *batch, struct fd_hw_query *hq,
struct fd_ringbuffer *ring)
+ assert_dt
{
int idx = pidx(hq->provider->query_type);
DBG("%p", hq);
static void
pause_query(struct fd_batch *batch, struct fd_hw_query *hq,
struct fd_ringbuffer *ring)
+ assert_dt
{
ASSERTED int idx = pidx(hq->provider->query_type);
DBG("%p", hq);
static void
fd_hw_begin_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
static void
fd_hw_end_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_batch *batch = fd_context_batch_locked(ctx);
struct fd_hw_query *hq = fd_hw_query(q);
* wait to flush unnecessarily but we also don't want to
* spin forever:
*/
- if (hq->no_wait_cnt++ > 5)
+ if (hq->no_wait_cnt++ > 5) {
+ fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
+ fd_context_access_end(ctx);
+ }
return false;
}
struct fd_resource *rsc = fd_resource(start->prsc);
- if (rsc->write_batch)
+ if (rsc->write_batch) {
+ fd_context_access_begin(ctx);
fd_batch_flush(rsc->write_batch);
+ fd_context_access_end(ctx);
+ }
/* some piglit tests at least do query with no draws, I guess: */
if (!rsc->bo)
/* Optional hook for enabling a counter. Guaranteed to happen
* at least once before the first ->get_sample() in a batch.
*/
- void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring);
+ void (*enable)(struct fd_context *ctx, struct fd_ringbuffer *ring) dt;
/* when a new sample is required, emit appropriate cmdstream
* and return a sample object:
*/
struct fd_hw_sample *(*get_sample)(struct fd_batch *batch,
- struct fd_ringbuffer *ring);
+ struct fd_ringbuffer *ring) dt;
/* accumulate the results from specified sample period: */
void (*accumulate_result)(struct fd_context *ctx,
struct fd_hw_sample * fd_hw_sample_init(struct fd_batch *batch, uint32_t size);
/* don't call directly, use fd_hw_sample_reference() */
void __fd_hw_sample_destroy(struct fd_context *ctx, struct fd_hw_sample *samp);
-void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles);
+void fd_hw_query_prepare(struct fd_batch *batch, uint32_t num_tiles) assert_dt;
void fd_hw_query_prepare_tile(struct fd_batch *batch, uint32_t n,
- struct fd_ringbuffer *ring);
-void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch);
-void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring);
+ struct fd_ringbuffer *ring) assert_dt;
+void fd_hw_query_update_batch(struct fd_batch *batch, bool end_batch) assert_dt;
+void fd_hw_query_enable(struct fd_batch *batch, struct fd_ringbuffer *ring) assert_dt;
void fd_hw_query_register_provider(struct pipe_context *pctx,
const struct fd_hw_sample_provider *provider);
void fd_hw_query_init(struct pipe_context *pctx);
static uint64_t
read_counter(struct fd_context *ctx, int type)
+ assert_dt
{
switch (type) {
case PIPE_QUERY_PRIMITIVES_GENERATED:
static void
fd_sw_begin_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_sw_query *sq = fd_sw_query(q);
sq->begin_value = read_counter(ctx, q->type);
static void
fd_sw_end_query(struct fd_context *ctx, struct fd_query *q)
+ assert_dt
{
struct fd_sw_query *sq = fd_sw_query(q);
sq->end_value = read_counter(ctx, q->type);
*/
static void
rebind_resource_in_ctx(struct fd_context *ctx, struct fd_resource *rsc)
+ assert_dt
{
struct pipe_resource *prsc = &rsc->base;
static void
rebind_resource(struct fd_resource *rsc)
+ assert_dt
{
struct fd_screen *screen = fd_screen(rsc->base.screen);
static void
do_blit(struct fd_context *ctx, const struct pipe_blit_info *blit, bool fallback)
+ assert_dt
{
struct pipe_context *pctx = &ctx->base;
static bool
fd_try_shadow_resource(struct fd_context *ctx, struct fd_resource *rsc,
unsigned level, const struct pipe_box *box, uint64_t modifier)
+ assert_dt
{
struct pipe_context *pctx = &ctx->base;
struct pipe_resource *prsc = &rsc->base;
static void
fd_blit_from_staging(struct fd_context *ctx, struct fd_transfer *trans)
+ assert_dt
{
struct pipe_resource *dst = trans->base.resource;
struct pipe_blit_info blit = {};
static void
fd_blit_to_staging(struct fd_context *ctx, struct fd_transfer *trans)
+ assert_dt
{
struct pipe_resource *src = trans->base.resource;
struct pipe_blit_info blit = {};
static void
flush_resource(struct fd_context *ctx, struct fd_resource *rsc, unsigned usage)
+ assert_dt
{
struct fd_batch *write_batch = NULL;
static void
fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
+ in_dt
{
flush_resource(fd_context(pctx), fd_resource(prsc), PIPE_MAP_READ);
}
static void
fd_resource_transfer_unmap(struct pipe_context *pctx,
struct pipe_transfer *ptrans)
+ in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(ptrans->resource);
unsigned level, unsigned usage,
const struct pipe_box *box,
struct pipe_transfer **pptrans)
+ in_dt /* TODO for threaded-ctx we'll need to split out unsynchronized path */
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
static void
fd_invalidate_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_resource *rsc = fd_resource(prsc);
static void
fd_blit_pipe(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
+ in_dt
{
/* wrap fd_blit to return void */
fd_blit(pctx, blit_info);
uint32_t fd_setup_slices(struct fd_resource *rsc);
void fd_resource_resize(struct pipe_resource *prsc, uint32_t sz);
-void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc);
+void fd_resource_uncompress(struct fd_context *ctx, struct fd_resource *rsc) assert_dt;
void fd_resource_dump(struct fd_resource *rsc, const char *name);
-bool fd_render_condition_check(struct pipe_context *pctx);
+bool fd_render_condition_check(struct pipe_context *pctx) assert_dt;
static inline bool
fd_batch_references_resource(struct fd_batch *batch, struct fd_resource *rsc)
static inline void
fd_batch_write_prep(struct fd_batch *batch, struct fd_resource *rsc)
+ assert_dt
{
if (unlikely(rsc->needs_ubwc_clear)) {
batch->ctx->clear_ubwc(batch, rsc);
static inline void
fd_batch_resource_read(struct fd_batch *batch,
struct fd_resource *rsc)
+ assert_dt
{
/* Fast path: if we hit this then we know we don't have anyone else
* writing to it (since both _write and _read flush other writers), and
static void
fd_set_blend_color(struct pipe_context *pctx,
const struct pipe_blend_color *blend_color)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->blend_color = *blend_color;
static void
fd_set_stencil_ref(struct pipe_context *pctx,
const struct pipe_stencil_ref stencil_ref)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->stencil_ref = stencil_ref;
static void
fd_set_clip_state(struct pipe_context *pctx,
const struct pipe_clip_state *clip)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->ucp = *clip;
static void
fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->sample_mask = (uint16_t)sample_mask;
static void
fd_set_min_samples(struct pipe_context *pctx, unsigned min_samples)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->min_samples = min_samples;
enum pipe_shader_type shader, uint index,
bool take_ownership,
const struct pipe_constant_buffer *cb)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
unsigned start, unsigned count,
const struct pipe_shader_buffer *buffers,
unsigned writable_bitmask)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[shader];
unsigned start, unsigned count,
unsigned unbind_num_trailing_slots,
const struct pipe_image_view *images)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_shaderimg_stateobj *so = &ctx->shaderimg[shader];
static void
fd_set_framebuffer_state(struct pipe_context *pctx,
const struct pipe_framebuffer_state *framebuffer)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_framebuffer_state *cso;
static void
fd_set_polygon_stipple(struct pipe_context *pctx,
const struct pipe_poly_stipple *stipple)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->stipple = *stipple;
unsigned start_slot,
unsigned num_scissors,
const struct pipe_scissor_state *scissor)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
unsigned start_slot,
unsigned num_viewports,
const struct pipe_viewport_state *viewport)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_scissor_state *scissor = &ctx->viewport_scissor;
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
static void
fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_blend_state *cso = hwcso;
static void
fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
FREE(hwcso);
}
static void
fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
static void
fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
FREE(hwcso);
}
static void
fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->zsa = hwcso;
static void
fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
FREE(hwcso);
}
static void
fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
FREE(hwcso);
}
static void
fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->vtx.vtx = hwcso;
fd_set_stream_output_targets(struct pipe_context *pctx,
unsigned num_targets, struct pipe_stream_output_target **targets,
const unsigned *offsets)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_streamout_stateobj *so = &ctx->streamout;
static void
fd_bind_compute_state(struct pipe_context *pctx, void *state)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
ctx->compute = state;
static void
fd_set_compute_resources(struct pipe_context *pctx,
unsigned start, unsigned count, struct pipe_surface **prscs)
+ in_dt
{
// TODO
}
fd_set_global_binding(struct pipe_context *pctx,
unsigned first, unsigned count, struct pipe_resource **prscs,
uint32_t **handles)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
struct fd_global_bindings_stateobj *so = &ctx->global_bindings;
#include "freedreno_context.h"
static inline bool fd_depth_enabled(struct fd_context *ctx)
+ assert_dt
{
return ctx->zsa && ctx->zsa->depth_enabled;
}
static inline bool fd_depth_write_enabled(struct fd_context *ctx)
+ assert_dt
{
return ctx->zsa && ctx->zsa->depth_writemask;
}
static inline bool fd_stencil_enabled(struct fd_context *ctx)
+ assert_dt
{
return ctx->zsa && ctx->zsa->stencil[0].enabled;
}
static inline bool fd_depth_clamp_enabled(struct fd_context *ctx)
+ assert_dt
{
return !(ctx->rasterizer->depth_clip_near && ctx->rasterizer->depth_clip_far);
}
fd_sampler_states_bind(struct pipe_context *pctx,
enum pipe_shader_type shader, unsigned start,
unsigned nr, void **hwcso)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
fd_set_sampler_views(struct pipe_context *pctx, enum pipe_shader_type shader,
unsigned start, unsigned nr, unsigned unbind_num_trailing_slots,
struct pipe_sampler_view **views)
+ in_dt
{
struct fd_context *ctx = fd_context(pctx);
mesa_logw(__VA_ARGS__); \
} while(0)
+struct fd_context;
+
+/**
+ * A psuedo-variable for defining where various parts of the fd_context
+ * can be safely accessed.
+ *
+ * With threaded_context, certain pctx funcs are called from gallium
+ * front-end/state-tracker (eg. CSO creation), while others are called
+ * from the driver thread. Things called from driver thread can safely
+ * access anything in the ctx, while things called from the fe/st thread
+ * must limit themselves to "safe" things (ie. ctx->screen is safe as it
+ * is immutable, but the blitter_context is not).
+ */
+extern lock_cap_t fd_context_access_cap;
+
+/**
+ * Make the annotation a bit less verbose.. mark fields which should only
+ * be accessed by driver-thread with 'dt'
+ */
+#define dt guarded_by(fd_context_access_cap)
+
+/**
+ * Annotation for entry-point functions only called in driver thread.
+ *
+ * For static functions, apply the annotation to the function declaration.
+ * Otherwise apply to the function prototype.
+ */
+#define in_dt assert_cap(fd_context_access_cap)
+
+/**
+ * Annotation for internal functions which are only called from entry-
+ * point functions (with 'in_dt' annotation) or other internal functions
+ * with the 'assert_dt' annotation.
+ *
+ * For static functions, apply the annotation to the function declaration.
+ * Otherwise apply to the function prototype.
+ */
+#define assert_dt requires_cap(fd_context_access_cap)
+
+/**
+ * Special helpers for context access outside of driver thread. For ex,
+ * pctx->get_query_result() is not called on driver thread, but the
+ * query is guaranteed to be flushed, or the driver thread queue is
+ * guaranteed to be flushed.
+ *
+ * Use with caution!
+ */
+static inline void
+fd_context_access_begin(struct fd_context *ctx)
+ acquire_cap(fd_context_access_cap)
+{
+}
+
+static inline void
+fd_context_access_end(struct fd_context *ctx)
+ release_cap(fd_context_access_cap)
+{
+}
+
+
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
static void
ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
+ assert_dt
{
/* when we emit const state via ring (IB2) we need a WFI, but when
* it is emit'd via stateobj, we don't
static inline void
emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, enum pipe_shader_type t)
+ assert_dt
{
enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
struct fd_context *ctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
const struct pipe_draw_start_count *draw)
+ assert_dt
{
debug_assert(v->type == MESA_SHADER_VERTEX);
static inline void
ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx)
+ assert_dt
{
debug_assert(v->type == MESA_SHADER_FRAGMENT);
static inline void
ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
struct fd_context *ctx, const struct pipe_grid_info *info)
+ assert_dt
{
debug_assert(gl_shader_stage_is_compute(v->type));