type. index is used to indicate which buffer to set (some APIs may allow
multiple ones to be set, and binding a specific one later, though drivers
are mostly restricted to the first one right now).
+ If take_ownership is true, the buffer reference is passed to the driver, so
+ that the driver doesn't have to increment the reference count.
* ``set_inlinable_constants`` sets inlinable constants for constant buffer 0.
ctx->pipe->set_shader_images(ctx->pipe, sh, 0, maximg, NULL);
}
for (int i = 0; i < maxcb; i++) {
- ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, NULL);
+ ctx->pipe->set_constant_buffer(ctx->pipe, sh, i, false, NULL);
}
}
}
ctx->pipe->bind_depth_stencil_alpha_state( ctx->pipe, NULL );
ctx->pipe->bind_fs_state( ctx->pipe, NULL );
- ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
ctx->pipe->bind_vs_state( ctx->pipe, NULL );
- ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, NULL);
+ ctx->pipe->set_constant_buffer(ctx->pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
if (ctx->has_geometry_shader) {
ctx->pipe->bind_gs_state(ctx->pipe, NULL);
}
static void
dd_context_set_constant_buffer(struct pipe_context *_pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *constant_buffer)
{
struct dd_context *dctx = dd_context(_pipe);
safe_memcpy(&dctx->draw_state.constant_buffers[shader][index],
constant_buffer, sizeof(*constant_buffer));
- pipe->set_constant_buffer(pipe, shader, index, constant_buffer);
+ pipe->set_constant_buffer(pipe, shader, index, take_ownership, constant_buffer);
}
static void
static void noop_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
}
static void
rbug_set_constant_buffer(struct pipe_context *_pipe,
enum pipe_shader_type shader,
- uint index,
+ uint index, bool take_ownership,
const struct pipe_constant_buffer *_cb)
{
struct rbug_context *rb_pipe = rbug_context(_pipe);
mtx_lock(&rb_pipe->call_mutex);
pipe->set_constant_buffer(pipe,
shader,
- index,
+ index, take_ownership,
_cb ? &cb : NULL);
mtx_unlock(&rb_pipe->call_mutex);
}
static void
trace_context_set_constant_buffer(struct pipe_context *_pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *constant_buffer)
{
struct trace_context *tr_ctx = trace_context(_pipe);
trace_dump_arg(ptr, pipe);
trace_dump_arg(uint, shader);
trace_dump_arg(uint, index);
+ trace_dump_arg(bool, take_ownership);
trace_dump_arg(constant_buffer, constant_buffer);
- pipe->set_constant_buffer(pipe, shader, index, constant_buffer);
+ pipe->set_constant_buffer(pipe, shader, index, take_ownership, constant_buffer);
trace_dump_call_end();
}
hud->constants.translate[1] = (float) (yoffset * hud_scale);
hud->constants.scale[0] = hud_scale;
hud->constants.scale[1] = yscale * hud_scale;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &hud->constbuf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &hud->constbuf);
u_upload_data(hud->pipe->stream_uploader, 0,
num_vertices * 2 * sizeof(float), 16, buffer,
pipe->set_sampler_views(pipe, PIPE_SHADER_FRAGMENT, 0, 1,
&hud->font_sampler_view);
cso_set_samplers(cso, PIPE_SHADER_FRAGMENT, 1, sampler_states);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &hud->constbuf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &hud->constbuf);
/* draw accumulated vertices for background quads */
cso_set_blend(cso, &hud->alpha_blend);
hud->constants.scale[0] = hud_scale;
hud->constants.scale[1] = hud_scale;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &hud->constbuf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &hud->constbuf);
cso_set_vertex_buffers(cso, 0, 1, &hud->bg.vbuf);
cso_draw_arrays(cso, PIPE_PRIM_QUADS, 0, hud->bg.num_vertices);
hud->constants.translate[1] = 0;
hud->constants.scale[0] = hud_scale;
hud->constants.scale[1] = hud_scale;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &hud->constbuf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &hud->constbuf);
if (hud->whitelines.num_vertices) {
cso_set_vertex_buffers(cso, 0, 1, &hud->whitelines.vbuf);
cso_restore_state(cso);
/* Unbind resources that we have bound. */
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, NULL);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
pipe->set_vertex_buffers(pipe, 0, 1, NULL);
pipe->set_sampler_views(pipe, PIPE_SHADER_FRAGMENT, 0, 1, NULL);
cb.user_buffer = constants;
struct pipe_context *pipe = ppq->p->pipe;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &cb);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &cb);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &cb);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &cb);
mstencil.stencil[0].enabled = 1;
mstencil.stencil[0].valuemask = mstencil.stencil[0].writemask = ~0;
/* Unbind resources that we have bound. */
struct pipe_context *pipe = ppq->p->pipe;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, NULL);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, NULL);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, NULL);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, NULL);
pipe->set_vertex_buffers(pipe, 0, 1, NULL);
pipe->set_sampler_views(pipe, PIPE_SHADER_FRAGMENT, 0, 3, NULL);
struct pipe_context *pipe = blitter->pipe;
pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, blitter->cb_slot,
- &blitter->saved_fs_constant_buffer);
- pipe_resource_reference(&blitter->saved_fs_constant_buffer.buffer, NULL);
+ true, &blitter->saved_fs_constant_buffer);
+ blitter->saved_fs_constant_buffer.buffer = NULL;
}
static void blitter_set_rectangle(struct blitter_context_priv *ctx,
.buffer_size = sizeof(mask),
};
pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, blitter->cb_slot,
- &cb);
+ false, &cb);
pipe->bind_depth_stencil_alpha_state(pipe,
get_stencil_blit_fallback_dsa(ctx, i));
struct pipe_constant_buffer cb = {0};
cb.buffer_size = sizeof(data);
cb.user_buffer = data;
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
struct pipe_image_view image = {0};
image.resource = dst;
ctx->memory_barrier(ctx, PIPE_BARRIER_ALL);
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, NULL);
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, NULL);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, NULL);
ctx->set_sampler_views(ctx, PIPE_SHADER_COMPUTE, 0, 1, NULL);
pipe_sampler_view_reference(&src_view, NULL);
ctx->delete_sampler_state(ctx, sampler_state_p);
cb.buffer_offset = 0;
cb.buffer_size = buf->width0;
cb.user_buffer = NULL;
- pipe->set_constant_buffer(pipe, shader, index, &cb);
+ pipe->set_constant_buffer(pipe, shader, index, false, &cb);
} else {
- pipe->set_constant_buffer(pipe, shader, index, NULL);
+ pipe->set_constant_buffer(pipe, shader, index, false, NULL);
}
}
static inline void
util_copy_constant_buffer(struct pipe_constant_buffer *dst,
- const struct pipe_constant_buffer *src)
+ const struct pipe_constant_buffer *src,
+ bool take_ownership)
{
if (src) {
- pipe_resource_reference(&dst->buffer, src->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&dst->buffer, NULL);
+ dst->buffer = src->buffer;
+ } else {
+ pipe_resource_reference(&dst->buffer, src->buffer);
+ }
dst->buffer_offset = src->buffer_offset;
dst->buffer_size = src->buffer_size;
dst->user_buffer = src->user_buffer;
{
struct tc_constant_buffer *p = (struct tc_constant_buffer *)payload;
- pipe->set_constant_buffer(pipe,
- p->shader,
- p->index,
- &p->cb);
- pipe_resource_reference(&p->cb.buffer, NULL);
+ pipe->set_constant_buffer(pipe, p->shader, p->index, true, &p->cb);
}
static void
tc_set_constant_buffer(struct pipe_context *_pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct threaded_context *tc = threaded_context(_pipe);
p->index = index;
if (cb) {
- tc_set_resource_reference(&p->cb.buffer, cb->buffer);
+ if (take_ownership)
+ p->cb.buffer = cb->buffer;
+ else
+ tc_set_resource_reference(&p->cb.buffer, cb->buffer);
+
p->cb.user_buffer = NULL;
p->cb.buffer_offset = cb->buffer_offset;
p->cb.buffer_size = cb->buffer_size;
filter->pipe->clear_render_target(filter->pipe, dst, &clear_color,
0, 0, dst->width, dst->height, false);
filter->pipe->set_constant_buffer(filter->pipe, PIPE_SHADER_FRAGMENT,
- 0, &cb);
+ 0, false, &cb);
filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state);
filter->pipe->bind_blend_state(filter->pipe, filter->blend);
filter->pipe->bind_sampler_states(filter->pipe, PIPE_SHADER_FRAGMENT,
/* Unbind. */
c->pipe->set_shader_images(c->pipe, PIPE_SHADER_COMPUTE, 0, 1, NULL);
- c->pipe->set_constant_buffer(c->pipe, PIPE_SHADER_COMPUTE, 0, NULL);
+ c->pipe->set_constant_buffer(c->pipe, PIPE_SHADER_COMPUTE, 0, false, NULL);
c->pipe->set_sampler_views(c->pipe, PIPE_SHADER_FRAGMENT, 0,
num_sampler_views, NULL);
c->pipe->bind_compute_state(c->pipe, NULL);
static void
d3d12_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *buf)
{
struct d3d12_context *ctx = d3d12_context(pctx);
D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT,
buf->user_buffer, &offset, &ctx->cbufs[shader][index].buffer);
- } else
- pipe_resource_reference(&ctx->cbufs[shader][index].buffer, buffer);
+ } else {
+ if (take_ownership) {
+ pipe_resource_reference(&ctx->cbufs[shader][index].buffer, NULL);
+ ctx->cbufs[shader][index].buffer = buffer;
+ } else {
+ pipe_resource_reference(&ctx->cbufs[shader][index].buffer, buffer);
+ }
+ }
ctx->cbufs[shader][index].buffer_offset = offset;
static void
etna_set_constant_buffer(struct pipe_context *pctx,
- enum pipe_shader_type shader, uint index,
+ enum pipe_shader_type shader, uint index, bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct etna_context *ctx = etna_context(pctx);
assert(index < ETNA_MAX_CONST_BUF);
- util_copy_constant_buffer(&so->cb[index], cb);
+ util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
/* Note that the gallium frontends can unbind constant buffers by
* passing NULL here. */
.buffer_size = 16,
.user_buffer = &color->ui,
};
- pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb);
+ pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, false, &cb);
unsigned rs_idx = pfb->samples > 1 ? 1 : 0;
if (!ctx->clear_rs_state[rs_idx]) {
static void
fd_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct fd_context *ctx = fd_context(pctx);
struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
- util_copy_constant_buffer(&so->cb[index], cb);
+ util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
/* Note that gallium frontends can unbind constant buffers by
* passing NULL here.
static void i915_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct i915_context *i915 = i915_context(pipe);
diff = i915->current.num_user_constants[shader] != 0;
}
- pipe_resource_reference(&i915->constants[shader], buf);
+ if (take_ownership) {
+ pipe_resource_reference(&i915->constants[shader], NULL);
+ i915->constants[shader] = buf;
+ } else {
+ pipe_resource_reference(&i915->constants[shader], buf);
+ }
i915->current.num_user_constants[shader] = new_num;
if (diff)
static void
iris_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type p_stage, unsigned index,
+ bool take_ownership,
const struct pipe_constant_buffer *input)
{
struct iris_context *ice = (struct iris_context *) ctx;
if (!cbuf->buffer) {
/* Allocation was unsuccessful - just unbind */
- iris_set_constant_buffer(ctx, p_stage, index, NULL);
+ iris_set_constant_buffer(ctx, p_stage, index, false, NULL);
return;
}
assert(map);
memcpy(map, input->user_buffer, input->buffer_size);
} else if (input->buffer) {
- pipe_resource_reference(&cbuf->buffer, input->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&cbuf->buffer, NULL);
+ cbuf->buffer = input->buffer;
+ } else {
+ pipe_resource_reference(&cbuf->buffer, input->buffer);
+ }
cbuf->buffer_offset = input->buffer_offset;
}
static void
lima_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
+ bool pass_reference,
const struct pipe_constant_buffer *cb)
{
struct lima_context *ctx = lima_context(pctx);
assert(num <= ARRAY_SIZE(setup->constants));
for (i = 0; i < num; ++i) {
- util_copy_constant_buffer(&setup->constants[i].current, &buffers[i]);
+ util_copy_constant_buffer(&setup->constants[i].current, &buffers[i], false);
}
for (; i < ARRAY_SIZE(setup->constants); i++) {
- util_copy_constant_buffer(&setup->constants[i].current, NULL);
+ util_copy_constant_buffer(&setup->constants[i].current, NULL, false);
}
setup->dirty |= LP_SETUP_NEW_CONSTANTS;
}
assert(num <= ARRAY_SIZE(csctx->constants));
for (i = 0; i < num; ++i) {
- util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i]);
+ util_copy_constant_buffer(&csctx->constants[i].current, &buffers[i], false);
}
for (; i < ARRAY_SIZE(csctx->constants); i++) {
- util_copy_constant_buffer(&csctx->constants[i].current, NULL);
+ util_copy_constant_buffer(&csctx->constants[i].current, NULL, false);
}
}
static void
llvmpipe_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct llvmpipe_context *llvmpipe = llvmpipe_context(pipe);
assert(index < ARRAY_SIZE(llvmpipe->constants[shader]));
/* note: reference counting */
- util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb);
+ util_copy_constant_buffer(&llvmpipe->constants[shader][index], cb,
+ take_ownership);
if (constants) {
if (!(constants->bind & PIPE_BIND_CONSTANT_BUFFER)) {
static void
nv30_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool pass_reference,
const struct pipe_constant_buffer *cb)
{
struct nv30_context *nv30 = nv30_context(pipe);
size = buf->width0 / (4 * sizeof(float));
if (shader == PIPE_SHADER_VERTEX) {
- pipe_resource_reference(&nv30->vertprog.constbuf, buf);
+ if (pass_reference) {
+ pipe_resource_reference(&nv30->vertprog.constbuf, NULL);
+ nv30->vertprog.constbuf = buf;
+ } else {
+ pipe_resource_reference(&nv30->vertprog.constbuf, buf);
+ }
nv30->vertprog.constbuf_nr = size;
nv30->dirty |= NV30_NEW_VERTCONST;
} else
if (shader == PIPE_SHADER_FRAGMENT) {
- pipe_resource_reference(&nv30->fragprog.constbuf, buf);
+ if (pass_reference) {
+ pipe_resource_reference(&nv30->fragprog.constbuf, NULL);
+ nv30->fragprog.constbuf = buf;
+ } else {
+ pipe_resource_reference(&nv30->fragprog.constbuf, buf);
+ }
nv30->fragprog.constbuf_nr = size;
nv30->dirty |= NV30_NEW_FRAGCONST;
}
static void
nv50_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct nv50_context *nv50 = nv50_context(pipe);
nouveau_bufctx_reset(nv50->bufctx_3d, NV50_BIND_3D_CB(s, i));
nv04_resource(nv50->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i);
}
- pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
+
+ if (take_ownership) {
+ pipe_resource_reference(&nv50->constbuf[s][i].u.buf, NULL);
+ nv50->constbuf[s][i].u.buf = res;
+ } else {
+ pipe_resource_reference(&nv50->constbuf[s][i].u.buf, res);
+ }
nv50->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nv50->constbuf[s][i].user) {
static void
nvc0_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct nvc0_context *nvc0 = nvc0_context(pipe);
if (nvc0->constbuf[s][i].u.buf)
nv04_resource(nvc0->constbuf[s][i].u.buf)->cb_bindings[s] &= ~(1 << i);
- pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
+
+ if (take_ownership) {
+ pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, NULL);
+ nvc0->constbuf[s][i].u.buf = res;
+ } else {
+ pipe_resource_reference(&nvc0->constbuf[s][i].u.buf, res);
+ }
nvc0->constbuf[s][i].user = (cb && cb->user_buffer) ? true : false;
if (nvc0->constbuf[s][i].user) {
};
if (info->input)
- pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &ubuf);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, false, &ubuf);
/* Invoke according to the grid info */
static void
panfrost_set_constant_buffer(
struct pipe_context *pctx,
- enum pipe_shader_type shader, uint index,
+ enum pipe_shader_type shader, uint index, bool take_ownership,
const struct pipe_constant_buffer *buf)
{
struct panfrost_context *ctx = pan_context(pctx);
struct panfrost_constant_buffer *pbuf = &ctx->constant_buffer[shader];
- util_copy_constant_buffer(&pbuf->cb[index], buf);
+ util_copy_constant_buffer(&pbuf->cb[index], buf, take_ownership);
unsigned mask = (1 << index);
static void r300_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct r300_context* r300 = r300_context(pipe);
cb.buffer = buffer;
cb.user_buffer = NULL;
- rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, &cb);
+ rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_COMPUTE, cb_index, false, &cb);
}
/* We need to define these R600 registers here, because we can't include
if (!rctx->tes_shader) {
rctx->lds_alloc = 0;
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_LDS_INFO_CONST_BUFFER, NULL);
+ R600_LDS_INFO_CONST_BUFFER, false, NULL);
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
- R600_LDS_INFO_CONST_BUFFER, NULL);
+ R600_LDS_INFO_CONST_BUFFER, false, NULL);
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_LDS_INFO_CONST_BUFFER, NULL);
+ R600_LDS_INFO_CONST_BUFFER, false, NULL);
return;
}
constbuf.buffer_size = 8 * 4;
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_LDS_INFO_CONST_BUFFER, &constbuf);
+ R600_LDS_INFO_CONST_BUFFER, false, &constbuf);
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_CTRL,
- R600_LDS_INFO_CONST_BUFFER, &constbuf);
+ R600_LDS_INFO_CONST_BUFFER, false, &constbuf);
rctx->b.b.set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_LDS_INFO_CONST_BUFFER, &constbuf);
- pipe_resource_reference(&constbuf.buffer, NULL);
+ R600_LDS_INFO_CONST_BUFFER, true, &constbuf);
}
uint32_t evergreen_get_ls_hs_config(struct r600_context *rctx,
if (rctx->append_fence)
pipe_resource_reference((struct pipe_resource**)&rctx->append_fence, NULL);
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
- rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, NULL);
+ rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, NULL);
free(rctx->driver_consts[sh].constants);
}
for (sh = 0; sh < PIPE_SHADER_TYPES; ++sh)
for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; ++i)
- rctx->b.b.set_constant_buffer(context, sh, i, NULL);
+ rctx->b.b.set_constant_buffer(context, sh, i, false, NULL);
if (rctx->blitter) {
util_blitter_destroy(rctx->blitter);
struct r600_qbo_state *st)
{
rctx->b.bind_compute_state(&rctx->b, st->saved_compute);
-
- rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
- pipe_resource_reference(&st->saved_const0.buffer, NULL);
-
+ rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, true, &st->saved_const0);
rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo, ~0);
for (unsigned i = 0; i < 3; ++i)
pipe_resource_reference(&st->saved_ssbo[i].buffer, NULL);
} else
consts.buffer_offset = 0;
- rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+ rctx->b.set_constant_buffer(&rctx->b, PIPE_SHADER_COMPUTE, 0, false, &constant_buffer);
rctx->b.set_shader_buffers(&rctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, ~0);
static void r600_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
} else {
/* Setup the hw buffer. */
cb->buffer_offset = input->buffer_offset;
- pipe_resource_reference(&cb->buffer, input->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&cb->buffer, NULL);
+ cb->buffer = input->buffer;
+ } else {
+ pipe_resource_reference(&cb->buffer, input->buffer);
+ }
r600_context_add_resource_size(ctx, input->buffer);
}
cb.user_buffer = ptr;
cb.buffer_offset = 0;
cb.buffer_size = size;
- rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, &cb);
+ rctx->b.b.set_constant_buffer(&rctx->b.b, sh, R600_BUFFER_INFO_CONST_BUFFER, false, &cb);
pipe_resource_reference(&cb.buffer, NULL);
}
}
if (enable) {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.esgs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.esgs_ring);
if (rctx->tes_shader) {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
} else {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_GS_RING_CONST_BUFFER, &rctx->gs_rings.gsvs_ring);
+ R600_GS_RING_CONST_BUFFER, false, &rctx->gs_rings.gsvs_ring);
}
} else {
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_GEOMETRY,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_VERTEX,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
r600_set_constant_buffer(&rctx->b.b, PIPE_SHADER_TESS_EVAL,
- R600_GS_RING_CONST_BUFFER, NULL);
+ R600_GS_RING_CONST_BUFFER, false, NULL);
}
}
}
ssbo[2].buffer_size = 8;
}
- sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+ sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, false, &constant_buffer);
sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, ssbo, 0x6);
if (wait) {
struct pipe_constant_buffer cb = {};
cb.buffer_size = sizeof(data);
cb.user_buffer = data;
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
struct pipe_shader_buffer sb = {0};
sb.buffer = dst;
si_launch_grid_internal(sctx, &info, saved_cs, SI_CS_WAIT_FOR_IDLE);
ctx->set_shader_buffers(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_sb, saved_writable_mask);
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
pipe_resource_reference(&saved_sb.buffer, NULL);
- pipe_resource_reference(&saved_cb.buffer, NULL);
}
static void si_compute_do_clear_or_copy(struct si_context *sctx, struct pipe_resource *dst,
struct pipe_constant_buffer cb = {};
cb.buffer_size = sizeof(data);
cb.user_buffer = data;
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
}
struct pipe_image_view image[2] = {0};
for (int i = 0; i < 2; i++)
pipe_resource_reference(&saved_image[i].resource, NULL);
if (!is_dcc_decompress) {
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
- pipe_resource_reference(&saved_cb.buffer, NULL);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
}
}
struct pipe_constant_buffer cb = {};
cb.buffer_size = sizeof(data);
cb.user_buffer = data;
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, false, &cb);
struct pipe_image_view image = {0};
image.resource = dstsurf->texture;
(render_condition_enabled ? SI_CS_RENDER_COND_ENABLE : 0));
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 1, &saved_image);
- ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, &saved_cb);
+ ctx->set_constant_buffer(ctx, PIPE_SHADER_COMPUTE, 0, true, &saved_cb);
pipe_resource_reference(&saved_image.resource, NULL);
- pipe_resource_reference(&saved_cb.buffer, NULL);
}
}
static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_resources *buffers,
- unsigned descriptors_idx, uint slot,
+ unsigned descriptors_idx, uint slot, bool take_ownership,
const struct pipe_constant_buffer *input)
{
struct si_descriptors *descs = &sctx->descriptors[descriptors_idx];
input->buffer_size, &buffer_offset);
if (!buffer) {
/* Just unbind on failure. */
- si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, NULL);
+ si_set_constant_buffer(sctx, buffers, descriptors_idx, slot, false, NULL);
return;
}
} else {
- pipe_resource_reference(&buffer, input->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&buffer, NULL);
+ buffer = input->buffer;
+ } else {
+ pipe_resource_reference(&buffer, input->buffer);
+ }
buffer_offset = input->buffer_offset;
}
}
static void si_pipe_set_constant_buffer(struct pipe_context *ctx, enum pipe_shader_type shader,
- uint slot, const struct pipe_constant_buffer *input)
+ uint slot, bool take_ownership,
+ const struct pipe_constant_buffer *input)
{
struct si_context *sctx = (struct si_context *)ctx;
slot = si_get_constbuf_slot(slot);
si_set_constant_buffer(sctx, &sctx->const_and_shader_buffers[shader],
- si_const_and_shader_buffer_descriptors_idx(shader), slot, input);
+ si_const_and_shader_buffer_descriptors_idx(shader), slot,
+ take_ownership, input);
}
static void si_set_inlinable_constants(struct pipe_context *ctx,
void si_set_internal_const_buffer(struct si_context *sctx, uint slot,
const struct pipe_constant_buffer *input)
{
- si_set_constant_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, input);
+ si_set_constant_buffer(sctx, &sctx->internal_bindings, SI_DESCS_INTERNAL, slot, false, input);
}
void si_set_internal_shader_buffer(struct si_context *sctx, uint slot,
unsigned start_shader = sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
for (shader = start_shader; shader < SI_NUM_SHADERS; shader++) {
for (i = 0; i < SI_NUM_CONST_BUFFERS; i++) {
- sctx->b.set_constant_buffer(&sctx->b, shader, i, &sctx->null_const_buf);
+ sctx->b.set_constant_buffer(&sctx->b, shader, i, false, &sctx->null_const_buf);
}
}
params.start_offset += qbuf->results_end - query->result_size;
}
- sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &constant_buffer);
+ sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, false, &constant_buffer);
ssbo[0].buffer = &qbuf->buf->b.b;
ssbo[0].buffer_offset = params.start_offset;
{
sctx->b.bind_compute_state(&sctx->b, st->saved_compute);
- sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, &st->saved_const0);
- pipe_resource_reference(&st->saved_const0.buffer, NULL);
+ sctx->b.set_constant_buffer(&sctx->b, PIPE_SHADER_COMPUTE, 0, true, &st->saved_const0);
sctx->b.set_shader_buffers(&sctx->b, PIPE_SHADER_COMPUTE, 0, 3, st->saved_ssbo,
st->saved_ssbo_writable_mask);
static void
softpipe_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct softpipe_context *softpipe = softpipe_context(pipe);
draw_flush(softpipe->draw);
/* note: reference counting */
- pipe_resource_reference(&softpipe->constants[shader][index], constants);
+ if (take_ownership) {
+ pipe_resource_reference(&softpipe->constants[shader][index], NULL);
+ softpipe->constants[shader][index] = constants;
+ } else {
+ pipe_resource_reference(&softpipe->constants[shader][index], constants);
+ }
if (shader == PIPE_SHADER_VERTEX || shader == PIPE_SHADER_GEOMETRY) {
draw_set_mapped_constant_buffer(softpipe->draw, shader, index, data, size);
static void
svga_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct svga_screen *svgascreen = svga_screen(pipe->screen);
assert(index < svgascreen->max_const_buffers);
(void) svgascreen;
- pipe_resource_reference(&svga->curr.constbufs[shader][index].buffer, buf);
+ if (take_ownership) {
+ pipe_resource_reference(&svga->curr.constbufs[shader][index].buffer, NULL);
+ svga->curr.constbufs[shader][index].buffer = buf;
+ } else {
+ pipe_resource_reference(&svga->curr.constbufs[shader][index].buffer, buf);
+ }
/* Make sure the constant buffer size to be updated is within the
* limit supported by the device.
cb.user_buffer = (void *) svga->curr.default_tesslevels;
cb.buffer_offset = 0;
cb.buffer_size = 2 * 4 * sizeof(float);
- svga->pipe.set_constant_buffer(&svga->pipe, PIPE_SHADER_TESS_CTRL, 0, &cb);
+ svga->pipe.set_constant_buffer(&svga->pipe, PIPE_SHADER_TESS_CTRL, 0, false, &cb);
}
static void
swr_set_constant_buffer(struct pipe_context *pipe,
enum pipe_shader_type shader,
- uint index,
+ uint index, bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct swr_context *ctx = swr_context(pipe);
assert(index < ARRAY_SIZE(ctx->constants[shader]));
/* note: reference counting */
- util_copy_constant_buffer(&ctx->constants[shader][index], cb);
+ util_copy_constant_buffer(&ctx->constants[shader][index], cb, take_ownership);
if (shader == PIPE_SHADER_VERTEX) {
ctx->dirty |= SWR_NEW_VSCONSTANTS;
static void
tegra_set_constant_buffer(struct pipe_context *pcontext, unsigned int shader,
- unsigned int index,
+ unsigned int index, bool take_ownership,
const struct pipe_constant_buffer *buf)
{
struct tegra_context *context = to_tegra_context(pcontext);
buf = &buffer;
}
- context->gpu->set_constant_buffer(context->gpu, shader, index, buf);
+ context->gpu->set_constant_buffer(context->gpu, shader, index, take_ownership, buf);
}
static void
static void
v3d_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct v3d_context *v3d = v3d_context(pctx);
struct v3d_constbuf_stateobj *so = &v3d->constbuf[shader];
- util_copy_constant_buffer(&so->cb[index], cb);
+ util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
/* Note that the gallium frontend can unbind constant buffers by
* passing NULL here.
.user_buffer = &stride,
.buffer_size = sizeof(stride),
};
- pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb_uniforms);
+ pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, false, &cb_uniforms);
struct pipe_constant_buffer cb_src = {
.buffer = info->src.resource,
.buffer_offset = src->slices[info->src.level].offset,
.buffer_size = (src->bo->size -
src->slices[info->src.level].offset),
};
- pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_src);
+ pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, false, &cb_src);
/* Unbind the textures, to make sure we don't try to recurse into the
* shadow blit.
util_blitter_restore_constant_buffer_state(vc4->blitter);
/* Restore cb1 (util_blitter doesn't handle this one). */
struct pipe_constant_buffer cb_disabled = { 0 };
- pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_disabled);
+ pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, false, &cb_disabled);
pipe_surface_reference(&dst_surf, NULL);
static void
vc4_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct vc4_context *vc4 = vc4_context(pctx);
if (index == 1 && so->cb[index].buffer_size != cb->buffer_size)
vc4->dirty |= VC4_DIRTY_UBO_1_SIZE;
- pipe_resource_reference(&so->cb[index].buffer, cb->buffer);
- so->cb[index].buffer_offset = cb->buffer_offset;
- so->cb[index].buffer_size = cb->buffer_size;
- so->cb[index].user_buffer = cb->user_buffer;
+ util_copy_constant_buffer(&so->cb[index], cb, take_ownership);
so->enabled_mask |= 1 << index;
so->dirty_mask |= 1 << index;
static void virgl_set_constant_buffer(struct pipe_context *ctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *buf)
{
struct virgl_context *vctx = virgl_context(ctx);
buf->buffer_offset,
buf->buffer_size, res);
- pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&binding->ubos[index].buffer, NULL);
+ binding->ubos[index].buffer = buf->buffer;
+ } else {
+ pipe_resource_reference(&binding->ubos[index].buffer, buf->buffer);
+ }
binding->ubos[index] = *buf;
binding->ubo_enabled_mask |= 1 << index;
} else {
static void
zink_set_constant_buffer(struct pipe_context *pctx,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *cb)
{
struct zink_context *ctx = zink_context(pctx);
cb->user_buffer, &offset, &buffer);
}
- pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
+ if (take_ownership) {
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, NULL);
+ ctx->ubos[shader][index].buffer = buffer;
+ } else {
+ pipe_resource_reference(&ctx->ubos[shader][index].buffer, buffer);
+ }
ctx->ubos[shader][index].buffer_offset = offset;
ctx->ubos[shader][index].buffer_size = cb->buffer_size;
ctx->ubos[shader][index].user_buffer = NULL;
if (state->pcbuf_dirty[PIPE_SHADER_COMPUTE]) {
state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
- 0, &state->pc_buffer[PIPE_SHADER_COMPUTE]);
+ 0, false, &state->pc_buffer[PIPE_SHADER_COMPUTE]);
state->pcbuf_dirty[PIPE_SHADER_COMPUTE] = false;
}
if (state->constbuf_dirty[PIPE_SHADER_COMPUTE]) {
for (unsigned i = 0; i < state->num_const_bufs[PIPE_SHADER_COMPUTE]; i++)
state->pctx->set_constant_buffer(state->pctx, PIPE_SHADER_COMPUTE,
- i + 1, &state->const_buffer[PIPE_SHADER_COMPUTE][i]);
+ i + 1, false, &state->const_buffer[PIPE_SHADER_COMPUTE][i]);
state->constbuf_dirty[PIPE_SHADER_COMPUTE] = false;
}
if (state->constbuf_dirty[sh]) {
for (unsigned idx = 0; idx < state->num_const_bufs[sh]; idx++)
state->pctx->set_constant_buffer(state->pctx, sh,
- idx + 1, &state->const_buffer[sh][idx]);
+ idx + 1, false, &state->const_buffer[sh][idx]);
}
state->constbuf_dirty[sh] = false;
}
for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
if (state->pcbuf_dirty[sh]) {
state->pctx->set_constant_buffer(state->pctx, sh,
- 0, &state->pc_buffer[sh]);
+ 0, false, &state->pc_buffer[sh]);
}
}
struct pipe_context *pipe = context->pipe;
if (unlikely(!context->programmable_vs))
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &context->pipe_data.cb_vs_ff);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &context->pipe_data.cb_vs_ff);
else {
if (context->swvp) {
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &context->pipe_data.cb0_swvp);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 1, &context->pipe_data.cb1_swvp);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 2, &context->pipe_data.cb2_swvp);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 3, &context->pipe_data.cb3_swvp);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &context->pipe_data.cb0_swvp);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 1, false, &context->pipe_data.cb1_swvp);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 2, false, &context->pipe_data.cb2_swvp);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 3, false, &context->pipe_data.cb3_swvp);
} else {
- pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, &context->pipe_data.cb_vs);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &context->pipe_data.cb_vs);
}
}
}
struct pipe_context *pipe = context->pipe;
if (unlikely(!context->ps))
- pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &context->pipe_data.cb_ps_ff);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &context->pipe_data.cb_ps_ff);
else
- pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &context->pipe_data.cb_ps);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &context->pipe_data.cb_ps);
}
static inline void
buf = cb.user_buffer;
- pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 0, &cb);
+ pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 0, false, &cb);
if (cb.buffer)
pipe_resource_reference(&cb.buffer, NULL);
cb.user_buffer = (char *)buf + 4096 * sizeof(float[4]);
- pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 1, &cb);
+ pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 1, false, &cb);
if (cb.buffer)
pipe_resource_reference(&cb.buffer, NULL);
}
cb.buffer_size = 2048 * sizeof(float[4]);
cb.user_buffer = state->vs_const_i;
- pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 2, &cb);
+ pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 2, false, &cb);
if (cb.buffer)
pipe_resource_reference(&cb.buffer, NULL);
}
cb.buffer_size = 512 * sizeof(float[4]);
cb.user_buffer = state->vs_const_b;
- pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 3, &cb);
+ pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 3, false, &cb);
if (cb.buffer)
pipe_resource_reference(&cb.buffer, NULL);
}
cb.user_buffer = NULL;
}
- pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 4, &cb);
+ pipe_sw->set_constant_buffer(pipe_sw, PIPE_SHADER_VERTEX, 4, false, &cb);
if (cb.buffer)
pipe_resource_reference(&cb.buffer, NULL);
}
cb.buffer_size = sizeof(constants);
cb.user_buffer = constants;
- pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, &cb);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, false, &cb);
/* Use the optimal block size for the linear image layout. */
struct pipe_grid_info info = {};
/* Unbind. */
pipe->set_shader_images(pipe, PIPE_SHADER_COMPUTE, 0, 3, NULL);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, NULL);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_COMPUTE, 0, false, NULL);
pipe->bind_compute_state(pipe, NULL);
} else {
/* Graphics path */
void (*set_clip_state)( struct pipe_context *,
const struct pipe_clip_state * );
+ /**
+ * Set constant buffer
+ *
+ * \param shader Shader stage
+ * \param index Buffer binding slot index within a shader stage
+ * \param take_ownership The callee takes ownership of the buffer reference.
+ * (the callee shouldn't increment the ref count)
+ * \param buf Constant buffer parameters
+ */
void (*set_constant_buffer)( struct pipe_context *,
enum pipe_shader_type shader, uint index,
+ bool take_ownership,
const struct pipe_constant_buffer *buf );
/**
cb1.user_buffer = constants1;
ctx->set_constant_buffer(ctx,
- PIPE_SHADER_FRAGMENT, 0,
+ PIPE_SHADER_FRAGMENT, 0, false,
&cb1);
memset(&cb2, 0, sizeof cb2);
cb2.user_buffer = constants2;
ctx->set_constant_buffer(ctx,
- PIPE_SHADER_FRAGMENT, 1,
+ PIPE_SHADER_FRAGMENT, 1, false,
&cb2);
}
_mesa_upload_state_parameters(st->ctx, params, ptr);
u_upload_unmap(pipe->const_uploader);
- pipe->set_constant_buffer(pipe, shader_type, 0, &cb);
- pipe_resource_reference(&cb.buffer, NULL);
+ pipe->set_constant_buffer(pipe, shader_type, 0, true, &cb);
/* Set inlinable constants. This is more involved because state
* parameters are uploaded directly above instead of being loaded
if (params->StateFlags)
_mesa_load_state_parameters(st->ctx, params);
- pipe->set_constant_buffer(pipe, shader_type, 0, &cb);
+ pipe->set_constant_buffer(pipe, shader_type, 0, false, &cb);
/* Set inlinable constants. */
unsigned num_inlinable_uniforms = prog->info.num_inlinable_uniforms;
/* Unbind. */
struct pipe_context *pipe = st->pipe;
- pipe->set_constant_buffer(pipe, shader_type, 0, NULL);
+ pipe->set_constant_buffer(pipe, shader_type, 0, false, NULL);
st->state.constbuf0_enabled_shader_mask &= ~(1 << shader_type);
}
}
cb.buffer_size = 0;
}
- pipe->set_constant_buffer(pipe, shader_type, 1 + i, &cb);
+ pipe->set_constant_buffer(pipe, shader_type, 1 + i, false, &cb);
}
}
cb.buffer_offset = 0;
cb.buffer_size = sizeof(addr->constants);
- pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, &cb);
+ pipe->set_constant_buffer(pipe, PIPE_SHADER_FRAGMENT, 0, false, &cb);
pipe_resource_reference(&cb.buffer, NULL);
}