void
cso_set_vertex_buffers(struct cso_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_trailing_count,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
return;
if (vbuf) {
- u_vbuf_set_vertex_buffers(vbuf, start_slot, count, unbind_trailing_count,
+ u_vbuf_set_vertex_buffers(vbuf, count, unbind_trailing_count,
take_ownership, buffers);
return;
}
struct pipe_context *pipe = ctx->base.pipe;
- pipe->set_vertex_buffers(pipe, start_slot, count, unbind_trailing_count,
+ pipe->set_vertex_buffers(pipe, count, unbind_trailing_count,
take_ownership, buffers);
}
/* Unbind all buffers in cso_context, because we'll use u_vbuf. */
unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
if (unbind_vb_count)
- pipe->set_vertex_buffers(pipe, 0, 0, unbind_vb_count, false, NULL);
+ pipe->set_vertex_buffers(pipe, 0, unbind_vb_count, false, NULL);
/* Unset this to make sure the CSO is re-bound on the next use. */
ctx->velements = NULL;
}
if (vb_count || unbind_trailing_vb_count) {
- u_vbuf_set_vertex_buffers(vbuf, 0, vb_count,
+ u_vbuf_set_vertex_buffers(vbuf, vb_count,
unbind_trailing_vb_count,
take_ownership, vbuffers);
}
/* Unbind all buffers in u_vbuf, because we'll use cso_context. */
unsigned unbind_vb_count = vb_count + unbind_trailing_vb_count;
if (unbind_vb_count)
- u_vbuf_set_vertex_buffers(vbuf, 0, 0, unbind_vb_count, false, NULL);
+ u_vbuf_set_vertex_buffers(vbuf, 0, unbind_vb_count, false, NULL);
/* Unset this to make sure the CSO is re-bound on the next use. */
u_vbuf_unset_vertex_elements(vbuf);
}
if (vb_count || unbind_trailing_vb_count) {
- pipe->set_vertex_buffers(pipe, 0, vb_count, unbind_trailing_vb_count,
+ pipe->set_vertex_buffers(pipe, vb_count, unbind_trailing_vb_count,
take_ownership, vbuffers);
}
cso_set_vertex_elements_direct(ctx, velems);
if (state_mask & CSO_BIT_VERTEX_ELEMENTS)
cso_restore_vertex_elements(cso);
if (unbind & CSO_UNBIND_VERTEX_BUFFER0)
- cso->base.pipe->set_vertex_buffers(cso->base.pipe, 0, 0, 1, false, NULL);
+ cso->base.pipe->set_vertex_buffers(cso->base.pipe, 0, 1, false, NULL);
if (state_mask & CSO_BIT_STREAM_OUTPUTS)
cso_restore_stream_outputs(cso);
if (state_mask & CSO_BIT_PAUSE_QUERIES)
const struct cso_velems_state *velems);
void cso_set_vertex_buffers(struct cso_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_trailing_count,
bool take_ownership,
const struct pipe_vertex_buffer *buffers);
void
draw_set_vertex_buffers(struct draw_context *draw,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
const struct pipe_vertex_buffer *buffers)
{
- assert(start_slot + count <= PIPE_MAX_ATTRIBS);
+ assert(count <= PIPE_MAX_ATTRIBS);
util_set_vertex_buffers_count(draw->pt.vertex_buffer,
&draw->pt.nr_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots, false);
}
*/
void draw_set_vertex_buffers(struct draw_context *draw,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
const struct pipe_vertex_buffer *buffers);
static void
dd_context_set_vertex_buffers(struct pipe_context *_pipe,
- unsigned start, unsigned num_buffers,
+ unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
struct dd_context *dctx = dd_context(_pipe);
struct pipe_context *pipe = dctx->pipe;
- safe_memcpy(&dctx->draw_state.vertex_buffers[start], buffers,
+ safe_memcpy(&dctx->draw_state.vertex_buffers[0], buffers,
sizeof(buffers[0]) * num_buffers);
- safe_memcpy(&dctx->draw_state.vertex_buffers[start + num_buffers], NULL,
+ safe_memcpy(&dctx->draw_state.vertex_buffers[num_buffers], NULL,
sizeof(buffers[0]) * unbind_num_trailing_slots);
- pipe->set_vertex_buffers(pipe, start, num_buffers,
+ pipe->set_vertex_buffers(pipe, num_buffers,
unbind_num_trailing_slots, take_ownership,
buffers);
}
}
static void noop_set_vertex_buffers(struct pipe_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
static void
trace_context_set_vertex_buffers(struct pipe_context *_pipe,
- unsigned start_slot, unsigned num_buffers,
+ unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
trace_dump_call_begin("pipe_context", "set_vertex_buffers");
trace_dump_arg(ptr, pipe);
- trace_dump_arg(uint, start_slot);
trace_dump_arg(uint, num_buffers);
trace_dump_arg(uint, unbind_num_trailing_slots);
trace_dump_arg(bool, take_ownership);
trace_dump_struct_array(vertex_buffer, buffers, num_buffers);
trace_dump_arg_end();
- pipe->set_vertex_buffers(pipe, start_slot, num_buffers,
+ pipe->set_vertex_buffers(pipe, num_buffers,
unbind_num_trailing_slots, take_ownership,
buffers);
u_upload_unmap(hud->pipe->stream_uploader);
vbuffer.stride = 2 * sizeof(float);
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &vbuffer);
+ cso_set_vertex_buffers(cso, 1, 0, false, &vbuffer);
pipe_resource_reference(&vbuffer.buffer.resource, NULL);
cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, prim, 0, num_vertices);
pipe->set_constant_buffer(pipe, PIPE_SHADER_VERTEX, 0, false, &hud->constbuf);
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &hud->bg.vbuf);
+ cso_set_vertex_buffers(cso, 1, 0, false, &hud->bg.vbuf);
cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->bg.num_vertices);
}
pipe_resource_reference(&hud->bg.vbuf.buffer.resource, NULL);
/* draw accumulated vertices for text */
if (hud->text.num_vertices) {
cso_set_vertex_shader_handle(cso, hud->vs_text);
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &hud->text.vbuf);
+ cso_set_vertex_buffers(cso, 1, 0, false, &hud->text.vbuf);
cso_set_fragment_shader_handle(hud->cso, hud->fs_text);
cso_draw_arrays(cso, MESA_PRIM_QUADS, 0, hud->text.num_vertices);
}
if (hud->whitelines.num_vertices) {
cso_set_vertex_shader_handle(cso, hud->vs_color);
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &hud->whitelines.vbuf);
+ cso_set_vertex_buffers(cso, 1, 0, false, &hud->whitelines.vbuf);
cso_set_fragment_shader_handle(hud->cso, hud->fs_color);
cso_draw_arrays(cso, MESA_PRIM_LINES, 0, hud->whitelines.num_vertices);
}
void
pp_filter_draw(struct pp_program *p)
{
- util_draw_vertex_buffer(p->pipe, p->cso, p->vbuf, 0, 0,
+ util_draw_vertex_buffer(p->pipe, p->cso, p->vbuf, 0,
MESA_PRIM_QUADS, 4, 2);
}
}
ctx->base.cb_slot = 0; /* 0 for now */
- ctx->base.vb_slot = 0; /* 0 for now */
/* vertex elements states */
memset(&velem[0], 0, sizeof(velem[0]) * 2);
for (i = 0; i < 2; i++) {
velem[i].src_offset = i * 4 * sizeof(float);
velem[i].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
- velem[i].vertex_buffer_index = ctx->base.vb_slot;
+ velem[i].vertex_buffer_index = 0;
}
ctx->velem_state = pipe->create_vertex_elements_state(pipe, 2, &velem[0]);
for (i = 0; i < 4; i++) {
velem[0].src_format = formats[i];
- velem[0].vertex_buffer_index = ctx->base.vb_slot;
+ velem[0].vertex_buffer_index = 0;
ctx->velem_state_readbuf[i] =
pipe->create_vertex_elements_state(pipe, 1, &velem[0]);
}
/* Vertex buffer. */
if (ctx->base.saved_vertex_buffer.buffer.resource) {
- pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1, 0, true,
+ pipe->set_vertex_buffers(pipe, 1, 0, true,
&ctx->base.saved_vertex_buffer);
ctx->base.saved_vertex_buffer.buffer.resource = NULL;
}
return;
u_upload_unmap(pipe->stream_uploader);
- pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1, 0, false, &vb);
+ pipe->set_vertex_buffers(pipe, 1, 0, false, &vb);
pipe->bind_vertex_elements_state(pipe, vertex_elements_cso);
pipe->bind_vs_state(pipe, get_vs(&ctx->base));
blitter_check_saved_vertex_states(ctx);
blitter_disable_render_cond(ctx);
- pipe->set_vertex_buffers(pipe, ctx->base.vb_slot, 1, 0, false, &vb);
+ pipe->set_vertex_buffers(pipe, 1, 0, false, &vb);
pipe->bind_vertex_elements_state(pipe,
ctx->velem_state_readbuf[num_channels-1]);
bind_vs_pos_only(ctx, num_channels);
unsigned cb_slot;
struct pipe_constant_buffer saved_fs_constant_buffer;
- unsigned vb_slot;
struct pipe_vertex_buffer saved_vertex_buffer;
unsigned saved_num_so_targets;
struct pipe_vertex_buffer *vertex_buffers)
{
pipe_vertex_buffer_reference(&blitter->saved_vertex_buffer,
- &vertex_buffers[blitter->vb_slot]);
+ &vertex_buffers[0]);
}
static inline void
util_draw_vertex_buffer(struct pipe_context *pipe,
struct cso_context *cso,
struct pipe_resource *vbuf,
- unsigned vbuf_slot,
unsigned offset,
enum mesa_prim prim_type,
unsigned num_verts,
/* note: vertex elements already set by caller */
if (cso) {
- cso_set_vertex_buffers(cso, vbuf_slot, 1, 0, false, &vbuffer);
+ cso_set_vertex_buffers(cso, 1, 0, false, &vbuffer);
cso_draw_arrays(cso, prim_type, 0, num_verts);
} else {
- pipe->set_vertex_buffers(pipe, vbuf_slot, 1, 0, false, &vbuffer);
+ pipe->set_vertex_buffers(pipe, 1, 0, false, &vbuffer);
util_draw_arrays(pipe, prim_type, 0, num_verts);
}
}
/* note: vertex elements already set by caller */
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &vbuffer);
+ cso_set_vertex_buffers(cso, 1, 0, false, &vbuffer);
cso_draw_arrays(cso, prim_type, 0, num_verts);
}
struct pipe_resource;
struct cso_context;
-extern void
+extern void
util_draw_vertex_buffer(struct pipe_context *pipe, struct cso_context *cso,
- struct pipe_resource *vbuf, unsigned vbuf_slot,
- unsigned offset, enum mesa_prim prim_type,
- unsigned num_attribs, unsigned num_verts);
+ struct pipe_resource *vbuf, unsigned offset,
+ enum mesa_prim prim_type, unsigned num_attribs,
+ unsigned num_verts);
void
util_draw_user_vertex_buffer(struct cso_context *cso, void *buffer,
void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
uint32_t *enabled_buffers,
const struct pipe_vertex_buffer *src,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership)
{
unsigned i;
uint32_t bitmask = 0;
- dst += start_slot;
-
- *enabled_buffers &= ~u_bit_consecutive(start_slot, count);
+ *enabled_buffers &= ~BITFIELD_MASK(count);
if (src) {
for (i = 0; i < count; i++) {
/* Copy over the other members of pipe_vertex_buffer. */
memcpy(dst, src, count * sizeof(struct pipe_vertex_buffer));
- *enabled_buffers |= bitmask << start_slot;
+ *enabled_buffers |= bitmask;
}
else {
/* Unreference the buffers. */
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
unsigned *dst_count,
const struct pipe_vertex_buffer *src,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership)
{
enabled_buffers |= (1ull << i);
}
- util_set_vertex_buffers_mask(dst, &enabled_buffers, src, start_slot,
+ util_set_vertex_buffers_mask(dst, &enabled_buffers, src,
count, unbind_num_trailing_slots,
take_ownership);
void util_set_vertex_buffers_mask(struct pipe_vertex_buffer *dst,
uint32_t *enabled_buffers,
const struct pipe_vertex_buffer *src,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership);
void util_set_vertex_buffers_count(struct pipe_vertex_buffer *dst,
unsigned *dst_count,
const struct pipe_vertex_buffer *src,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership);
struct tc_vertex_buffers {
struct tc_call_base base;
- uint8_t start, count;
+ uint8_t count;
uint8_t unbind_num_trailing_slots;
struct pipe_vertex_buffer slot[0]; /* more will be allocated if needed */
};
unsigned count = p->count;
if (!count) {
- pipe->set_vertex_buffers(pipe, p->start, 0,
- p->unbind_num_trailing_slots, false, NULL);
+ pipe->set_vertex_buffers(pipe, 0, p->unbind_num_trailing_slots, false, NULL);
return call_size(tc_vertex_buffers);
}
for (unsigned i = 0; i < count; i++)
tc_assert(!p->slot[i].is_user_buffer);
- pipe->set_vertex_buffers(pipe, p->start, count,
- p->unbind_num_trailing_slots, true, p->slot);
+ pipe->set_vertex_buffers(pipe, count, p->unbind_num_trailing_slots, true, p->slot);
return p->base.num_slots;
}
static void
tc_set_vertex_buffers(struct pipe_context *_pipe,
- unsigned start, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
if (count && buffers) {
struct tc_vertex_buffers *p =
tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, count);
- p->start = start;
p->count = count;
p->unbind_num_trailing_slots = unbind_num_trailing_slots;
struct pipe_resource *buf = buffers[i].buffer.resource;
if (buf) {
- tc_bind_buffer(tc, &tc->vertex_buffers[start + i], next, buf);
+ tc_bind_buffer(tc, &tc->vertex_buffers[i], next, buf);
} else {
- tc_unbind_buffer(&tc->vertex_buffers[start + i]);
+ tc_unbind_buffer(&tc->vertex_buffers[i]);
}
}
} else {
dst->buffer_offset = src->buffer_offset;
if (buf) {
- tc_bind_buffer(tc, &tc->vertex_buffers[start + i], next, buf);
+ tc_bind_buffer(tc, &tc->vertex_buffers[i], next, buf);
} else {
- tc_unbind_buffer(&tc->vertex_buffers[start + i]);
+ tc_unbind_buffer(&tc->vertex_buffers[i]);
}
}
}
- tc_unbind_buffers(&tc->vertex_buffers[start + count],
+ tc_unbind_buffers(&tc->vertex_buffers[count],
unbind_num_trailing_slots);
} else {
struct tc_vertex_buffers *p =
tc_add_slot_based_call(tc, TC_CALL_set_vertex_buffers, tc_vertex_buffers, 0);
- p->start = start;
p->count = 0;
p->unbind_num_trailing_slots = count + unbind_num_trailing_slots;
- tc_unbind_buffers(&tc->vertex_buffers[start],
+ tc_unbind_buffers(&tc->vertex_buffers[0],
count + unbind_num_trailing_slots);
}
}
const unsigned num_vb = screen->get_shader_param(screen, PIPE_SHADER_VERTEX,
PIPE_SHADER_CAP_MAX_INPUTS);
- mgr->pipe->set_vertex_buffers(mgr->pipe, 0, 0, num_vb, false, NULL);
+ mgr->pipe->set_vertex_buffers(mgr->pipe, 0, num_vb, false, NULL);
for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[i]);
}
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *bufs)
uint32_t nonzero_stride_vb_mask = 0;
/* which buffers are unaligned to 2/4 bytes */
uint32_t unaligned_vb_mask[2] = {0};
- uint32_t mask =
- ~(((1ull << (count + unbind_num_trailing_slots)) - 1) << start_slot);
+ uint32_t mask = ~BITFIELD64_MASK(count + unbind_num_trailing_slots);
if (!bufs) {
struct pipe_context *pipe = mgr->pipe;
mgr->unaligned_vb_mask[1] &= mask;
for (i = 0; i < total_count; i++) {
- unsigned dst_index = start_slot + i;
+ unsigned dst_index = i;
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]);
}
- pipe->set_vertex_buffers(pipe, start_slot, count,
- unbind_num_trailing_slots, false, NULL);
+ pipe->set_vertex_buffers(pipe, count, unbind_num_trailing_slots, false, NULL);
return;
}
for (i = 0; i < count; i++) {
- unsigned dst_index = start_slot + i;
+ unsigned dst_index = i;
const struct pipe_vertex_buffer *vb = &bufs[i];
struct pipe_vertex_buffer *orig_vb = &mgr->vertex_buffer[dst_index];
struct pipe_vertex_buffer *real_vb = &mgr->real_vertex_buffer[dst_index];
}
for (i = 0; i < unbind_num_trailing_slots; i++) {
- unsigned dst_index = start_slot + count + i;
+ unsigned dst_index = count + i;
pipe_vertex_buffer_unreference(&mgr->vertex_buffer[dst_index]);
pipe_vertex_buffer_unreference(&mgr->real_vertex_buffer[dst_index]);
static void u_vbuf_set_driver_vertex_buffers(struct u_vbuf *mgr)
{
struct pipe_context *pipe = mgr->pipe;
- unsigned start_slot, count;
-
- start_slot = ffs(mgr->dirty_real_vb_mask) - 1;
- count = util_last_bit(mgr->dirty_real_vb_mask >> start_slot);
+ unsigned count = util_last_bit(mgr->dirty_real_vb_mask);
if (mgr->dirty_real_vb_mask == mgr->enabled_vb_mask &&
mgr->dirty_real_vb_mask == mgr->user_vb_mask) {
* to skip atomic reference counting there. These are freshly uploaded
* user buffers that can be discarded after this call.
*/
- pipe->set_vertex_buffers(pipe, start_slot, count, 0, true,
- mgr->real_vertex_buffer + start_slot);
+ pipe->set_vertex_buffers(pipe, count, 0, true, mgr->real_vertex_buffer);
/* We don't own the VBO references now. Set them to NULL. */
for (unsigned i = 0; i < count; i++) {
- assert(!mgr->real_vertex_buffer[start_slot + i].is_user_buffer);
- mgr->real_vertex_buffer[start_slot + i].buffer.resource = NULL;
+ assert(!mgr->real_vertex_buffer[i].is_user_buffer);
+ mgr->real_vertex_buffer[i].buffer.resource = NULL;
}
} else {
/* Slow path where we have to keep VBO references. */
- pipe->set_vertex_buffers(pipe, start_slot, count, 0, false,
- mgr->real_vertex_buffer + start_slot);
+ pipe->set_vertex_buffers(pipe, count, 0, false, mgr->real_vertex_buffer);
}
mgr->dirty_real_vb_mask = 0;
}
const struct cso_velems_state *velems);
void u_vbuf_unset_vertex_elements(struct u_vbuf *mgr);
void u_vbuf_set_vertex_buffers(struct u_vbuf *mgr,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *bufs);
filter->pipe->bind_fs_state(filter->pipe, filter->fs);
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
- filter->pipe->set_vertex_buffers(filter->pipe, 0, 1, 0, false, &filter->quad);
+ filter->pipe->set_vertex_buffers(filter->pipe, 1, 0, false, &filter->quad);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
c->pipe->set_framebuffer_state(c->pipe, &c->fb_state);
c->pipe->bind_vs_state(c->pipe, c->vs);
- c->pipe->set_vertex_buffers(c->pipe, 0, 1, 0, false, &c->vertex_buf);
+ c->pipe->set_vertex_buffers(c->pipe, 1, 0, false, &c->vertex_buf);
c->pipe->bind_vertex_elements_state(c->pipe, c->vertex_elems_state);
pipe_set_constant_buffer(c->pipe, PIPE_SHADER_FRAGMENT, 0, s->shader_params);
c->pipe->bind_rasterizer_state(c->pipe, c->rast);
/* set up pipe state */
filter->pipe->bind_rasterizer_state(filter->pipe, filter->rs_state);
- filter->pipe->set_vertex_buffers(filter->pipe, 0, 1, 0, false, &filter->quad);
+ filter->pipe->set_vertex_buffers(filter->pipe, 1, 0, false, &filter->quad);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
filter->pipe->bind_vs_state(filter->pipe, filter->vs);
filter->pipe->bind_sampler_states(filter->pipe, PIPE_SHADER_FRAGMENT,
filter->pipe->bind_fs_state(filter->pipe, filter->fs);
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
- filter->pipe->set_vertex_buffers(filter->pipe, 0, 1, 0, false, &filter->quad);
+ filter->pipe->set_vertex_buffers(filter->pipe, 1, 0, false, &filter->quad);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
filter->pipe->bind_fs_state(filter->pipe, filter->fs);
filter->pipe->set_framebuffer_state(filter->pipe, &fb_state);
filter->pipe->set_viewport_states(filter->pipe, 0, 1, &viewport);
- filter->pipe->set_vertex_buffers(filter->pipe, 0, 1, 0, false, &filter->quad);
+ filter->pipe->set_vertex_buffers(filter->pipe, 1, 0, false, &filter->quad);
filter->pipe->bind_vertex_elements_state(filter->pipe, filter->ves);
util_draw_arrays(filter->pipe, MESA_PRIM_QUADS, 0, 4);
if (!ref_frames[j] || !ref_frames[j][i]) continue;
vb[2] = vl_vb_get_mv(&buf->vertex_stream, j);
- dec->context->set_vertex_buffers(dec->context, 0, 3, 0, false, vb);
+ dec->context->set_vertex_buffers(dec->context, 3, 0, false, vb);
vl_mc_render_ref(i ? &dec->mc_c : &dec->mc_y, &buf->mc[i], ref_frames[j][i]);
}
if (!buf->num_ycbcr_blocks[i]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, i);
- dec->context->set_vertex_buffers(dec->context, 0, 2, 0, false, vb);
+ dec->context->set_vertex_buffers(dec->context, 2, 0, false, vb);
vl_zscan_render(i ? &dec->zscan_c : & dec->zscan_y, &buf->zscan[i] , buf->num_ycbcr_blocks[i]);
if (!buf->num_ycbcr_blocks[plane]) continue;
vb[1] = vl_vb_get_ycbcr(&buf->vertex_stream, plane);
- dec->context->set_vertex_buffers(dec->context, 0, 2, 0, false, vb);
+ dec->context->set_vertex_buffers(dec->context, 2, 0, false, vb);
if (dec->base.entrypoint <= PIPE_VIDEO_ENTRYPOINT_IDCT)
vl_idct_prepare_stage2(i ? &dec->idct_c : &dec->idct_y, &buf->idct[plane]);
/* BOs added to the batch in the uniform upload path */
static void
-agx_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
- unsigned count, unsigned unbind_num_trailing_slots,
- bool take_ownership,
+agx_set_vertex_buffers(struct pipe_context *pctx, unsigned count,
+ unsigned unbind_num_trailing_slots, bool take_ownership,
const struct pipe_vertex_buffer *buffers)
{
struct agx_context *ctx = agx_context(pctx);
util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
- start_slot, count, unbind_num_trailing_slots,
+ count, unbind_num_trailing_slots,
take_ownership);
ctx->dirty |= AGX_DIRTY_VERTEX;
*/
static void
crocus_set_vertex_buffers(struct pipe_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
const unsigned padding =
(GFX_VERx10 < 75 && screen->devinfo.platform != INTEL_PLATFORM_BYT) * 2;
ice->state.bound_vertex_buffers &=
- ~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
+ ~u_bit_consecutive64(0, count + unbind_num_trailing_slots);
util_set_vertex_buffers_mask(ice->state.vertex_buffers, &ice->state.bound_vertex_buffers,
- buffers, start_slot, count, unbind_num_trailing_slots,
+ buffers, count, unbind_num_trailing_slots,
take_ownership);
for (unsigned i = 0; i < count; i++) {
struct pipe_vertex_buffer *state =
- &ice->state.vertex_buffers[start_slot + i];
+ &ice->state.vertex_buffers[i];
if (!state->is_user_buffer && state->buffer.resource) {
struct crocus_resource *res = (void *)state->buffer.resource;
uint32_t end = 0;
if (state->buffer.resource)
end = state->buffer.resource->width0 + padding;
- ice->state.vb_end[start_slot + i] = end;
+ ice->state.vb_end[i] = end;
}
ice->state.dirty |= CROCUS_DIRTY_VERTEX_BUFFERS;
}
static void
d3d12_set_vertex_buffers(struct pipe_context *pctx,
- unsigned start_slot,
unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
{
struct d3d12_context *ctx = d3d12_context(pctx);
util_set_vertex_buffers_count(ctx->vbs, &ctx->num_vbs,
- buffers, start_slot, num_buffers,
+ buffers, num_buffers,
unbind_num_trailing_slots,
take_ownership);
}
static void
-etna_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
- unsigned num_buffers, unsigned unbind_num_trailing_slots, bool take_ownership,
+etna_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
+ unsigned unbind_num_trailing_slots, bool take_ownership,
const struct pipe_vertex_buffer *vb)
{
struct etna_context *ctx = etna_context(pctx);
struct etna_vertexbuf_state *so = &ctx->vertex_buffer;
- util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot,
+ util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
num_buffers, unbind_num_trailing_slots,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
- for (unsigned idx = start_slot; idx < start_slot + num_buffers; ++idx) {
+ for (unsigned idx = 0; idx < num_buffers; ++idx) {
struct compiled_set_vertex_buffer *cs = &so->cvb[idx];
struct pipe_vertex_buffer *vbi = &so->vb[idx];
pctx->set_viewport_states(pctx, 0, 1, &vp);
pctx->bind_vertex_elements_state(pctx, ctx->solid_vbuf_state.vtx);
- pctx->set_vertex_buffers(pctx, blitter->vb_slot, 1, 0, false,
+ pctx->set_vertex_buffers(pctx, 1, 0, false,
&ctx->solid_vbuf_state.vertexbuf.vb[0]);
pctx->set_stream_output_targets(pctx, 0, NULL, NULL);
}
static void
-fd_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
+fd_set_vertex_buffers(struct pipe_context *pctx,
unsigned count, unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb) in_dt
if (ctx->screen->gen < 3) {
for (i = 0; i < count; i++) {
bool new_enabled = vb && vb[i].buffer.resource;
- bool old_enabled = so->vb[start_slot + i].buffer.resource != NULL;
+ bool old_enabled = so->vb[i].buffer.resource != NULL;
uint32_t new_stride = vb ? vb[i].stride : 0;
- uint32_t old_stride = so->vb[start_slot + i].stride;
+ uint32_t old_stride = so->vb[i].stride;
if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
fd_context_dirty(ctx, FD_DIRTY_VTXSTATE);
break;
}
}
- util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot,
+ util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
count, unbind_num_trailing_slots,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
*/
if (vb[i].buffer.resource &&
unlikely(vb[i].buffer_offset >= vb[i].buffer.resource->width0)) {
- so->vb[start_slot + i].buffer_offset = 0;
+ so->vb[i].buffer_offset = 0;
}
}
}
}
static void
-i915_set_vertex_buffers(struct pipe_context *pipe, unsigned start_slot,
+i915_set_vertex_buffers(struct pipe_context *pipe,
unsigned count, unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
struct draw_context *draw = i915->draw;
util_set_vertex_buffers_count(i915->vertex_buffers, &i915->nr_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots, take_ownership);
/* pass-through to draw module */
- draw_set_vertex_buffers(draw, start_slot, count, unbind_num_trailing_slots,
+ draw_set_vertex_buffers(draw, count, unbind_num_trailing_slots,
buffers);
}
*/
static void
iris_set_vertex_buffers(struct pipe_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
struct iris_genx_state *genx = ice->state.genx;
ice->state.bound_vertex_buffers &=
- ~u_bit_consecutive64(start_slot, count + unbind_num_trailing_slots);
+ ~u_bit_consecutive64(0, count + unbind_num_trailing_slots);
for (unsigned i = 0; i < count; i++) {
const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
struct iris_vertex_buffer_state *state =
- &genx->vertex_buffers[start_slot + i];
+ &genx->vertex_buffers[i];
if (!buffer) {
pipe_resource_reference(&state->resource, NULL);
state->offset = (int) buffer->buffer_offset;
if (res) {
- ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
+ ice->state.bound_vertex_buffers |= 1ull << i;
res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
}
iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
- vb.VertexBufferIndex = start_slot + i;
+ vb.VertexBufferIndex = i;
vb.AddressModifyEnable = true;
vb.BufferPitch = buffer->stride;
if (res) {
for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
struct iris_vertex_buffer_state *state =
- &genx->vertex_buffers[start_slot + count + i];
+ &genx->vertex_buffers[count + i];
pipe_resource_reference(&state->resource, NULL);
}
static void
lima_set_vertex_buffers(struct pipe_context *pctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
struct lima_context_vertex_buffer *so = &ctx->vertex_buffers;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask,
- vb, start_slot, count,
+ vb, count,
unbind_num_trailing_slots,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
struct lima_context_vertex_buffer *so = &ctx->vertex_buffers;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, NULL,
- 0, 0, ARRAY_SIZE(so->vb), false);
+ 0, ARRAY_SIZE(so->vb), false);
}
static void
llvmpipe_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
util_set_vertex_buffers_count(llvmpipe->vertex_buffer,
&llvmpipe->num_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots,
take_ownership);
llvmpipe->dirty |= LP_NEW_VERTEX;
- draw_set_vertex_buffers(llvmpipe->draw, start_slot, count,
+ draw_set_vertex_buffers(llvmpipe->draw, count,
unbind_num_trailing_slots, buffers);
}
if (nv30->draw_dirty & NV30_NEW_CLIP)
draw_set_clip_state(draw, &nv30->clip);
if (nv30->draw_dirty & NV30_NEW_ARRAYS) {
- draw_set_vertex_buffers(draw, 0, nv30->num_vtxbufs, 0, nv30->vtxbuf);
+ draw_set_vertex_buffers(draw, nv30->num_vtxbufs, 0, nv30->vtxbuf);
draw_set_vertex_elements(draw, nv30->vertex->num_elements, nv30->vertex->pipe);
}
if (nv30->draw_dirty & NV30_NEW_FRAGPROG) {
static void
nv30_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
nouveau_bufctx_reset(nv30->bufctx, BUFCTX_VTXBUF);
util_set_vertex_buffers_count(nv30->vtxbuf, &nv30->num_vtxbufs,
- vb, start_slot, count,
+ vb, count,
unbind_num_trailing_slots,
take_ownership);
static void
nv50_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
nv50->dirty_3d |= NV50_NEW_3D_ARRAYS;
util_set_vertex_buffers_count(nv50->vtxbuf, &nv50->num_vtxbufs, vb,
- start_slot, count,
- unbind_num_trailing_slots,
+ count, unbind_num_trailing_slots,
take_ownership);
- unsigned clear_mask = ~u_bit_consecutive(start_slot + count, unbind_num_trailing_slots);
+ unsigned clear_mask = ~u_bit_consecutive(count, unbind_num_trailing_slots);
nv50->vbo_user &= clear_mask;
nv50->vbo_constant &= clear_mask;
nv50->vtxbufs_coherent &= clear_mask;
if (!vb) {
- clear_mask = ~u_bit_consecutive(start_slot, count);
+ clear_mask = ~u_bit_consecutive(0, count);
nv50->vbo_user &= clear_mask;
nv50->vbo_constant &= clear_mask;
nv50->vtxbufs_coherent &= clear_mask;
}
for (i = 0; i < count; ++i) {
- unsigned dst_index = start_slot + i;
+ unsigned dst_index = i;
if (vb[i].is_user_buffer) {
nv50->vbo_user |= 1 << dst_index;
static void
nvc0_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
nvc0->dirty_3d |= NVC0_NEW_3D_ARRAYS;
util_set_vertex_buffers_count(nvc0->vtxbuf, &nvc0->num_vtxbufs, vb,
- start_slot, count,
- unbind_num_trailing_slots,
+ count, unbind_num_trailing_slots,
take_ownership);
- unsigned clear_mask = ~u_bit_consecutive(start_slot + count, unbind_num_trailing_slots);
+ unsigned clear_mask = ~u_bit_consecutive(count, unbind_num_trailing_slots);
nvc0->vbo_user &= clear_mask;
nvc0->constant_vbos &= clear_mask;
nvc0->vtxbufs_coherent &= clear_mask;
if (!vb) {
- clear_mask = ~u_bit_consecutive(start_slot, count);
+ clear_mask = ~u_bit_consecutive(0, count);
nvc0->vbo_user &= clear_mask;
nvc0->constant_vbos &= clear_mask;
nvc0->vtxbufs_coherent &= clear_mask;
}
for (i = 0; i < count; ++i) {
- unsigned dst_index = start_slot + i;
+ unsigned dst_index = i;
if (vb[i].is_user_buffer) {
nvc0->vbo_user |= 1 << dst_index;
}
static void
-panfrost_set_vertex_buffers(struct pipe_context *pctx, unsigned start_slot,
- unsigned num_buffers,
+panfrost_set_vertex_buffers(struct pipe_context *pctx, unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
struct panfrost_context *ctx = pan_context(pctx);
util_set_vertex_buffers_mask(ctx->vertex_buffers, &ctx->vb_mask, buffers,
- start_slot, num_buffers,
- unbind_num_trailing_slots, take_ownership);
+ num_buffers, unbind_num_trailing_slots,
+ take_ownership);
ctx->dirty |= PAN_DIRTY_VERTEX;
}
vb.depth0 = 1;
r300->dummy_vb.buffer.resource = screen->resource_create(screen, &vb);
- r300->context.set_vertex_buffers(&r300->context, 0, 1, 0, false, &r300->dummy_vb);
+ r300->context.set_vertex_buffers(&r300->context, 1, 0, false, &r300->dummy_vb);
}
{
}
static void r300_set_vertex_buffers_hwtcl(struct pipe_context* pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer* buffers)
util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots, take_ownership);
/* There must be at least one vertex buffer set, otherwise it locks up. */
if (!r300->nr_vertex_buffers) {
util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers,
- &r300->dummy_vb, 0, 1, 0, false);
+ &r300->dummy_vb, 1, 0, false);
}
r300->vertex_arrays_dirty = true;
}
static void r300_set_vertex_buffers_swtcl(struct pipe_context* pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer* buffers)
util_set_vertex_buffers_count(r300->vertex_buffer,
&r300->nr_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots, take_ownership);
- draw_set_vertex_buffers(r300->draw, start_slot, count,
+ draw_set_vertex_buffers(r300->draw, count,
unbind_num_trailing_slots, buffers);
if (!buffers)
for (i = 0; i < count; i++) {
if (buffers[i].is_user_buffer) {
- draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
+ draw_set_mapped_vertex_buffer(r300->draw, i,
buffers[i].buffer.user, ~0);
} else if (buffers[i].buffer.resource) {
- draw_set_mapped_vertex_buffer(r300->draw, start_slot + i,
+ draw_set_mapped_vertex_buffer(r300->draw, i,
r300_resource(buffers[i].buffer.resource)->malloced_buffer, ~0);
}
}
vbuffer.stride = 2 * 4 * sizeof(float); /* vertex size */
vbuffer.buffer_offset = offset;
- rctx->b.set_vertex_buffers(&rctx->b, blitter->vb_slot, 1, 0, false, &vbuffer);
+ rctx->b.set_vertex_buffers(&rctx->b, 1, 0, false, &vbuffer);
util_draw_arrays_instanced(&rctx->b, R600_PRIM_RECTANGLE_LIST, 0, 3,
0, num_instances);
pipe_resource_reference(&buf, NULL);
}
static void r600_set_vertex_buffers(struct pipe_context *ctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *input)
{
struct r600_context *rctx = (struct r600_context *)ctx;
struct r600_vertexbuf_state *state = &rctx->vertex_buffer_state;
- struct pipe_vertex_buffer *vb = state->vb + start_slot;
+ struct pipe_vertex_buffer *vb = state->vb;
unsigned i;
uint32_t disable_mask = 0;
/* These are the new buffers set by this function. */
}
disable_mask |= ((1ull << unbind_num_trailing_slots) - 1) << count;
- disable_mask <<= start_slot;
- new_buffer_mask <<= start_slot;
-
rctx->vertex_buffer_state.enabled_mask &= ~disable_mask;
rctx->vertex_buffer_state.dirty_mask &= rctx->vertex_buffer_state.enabled_mask;
rctx->vertex_buffer_state.enabled_mask |= new_buffer_mask;
FREE(state);
}
-static void si_set_vertex_buffers(struct pipe_context *ctx, unsigned start_slot, unsigned count,
+static void si_set_vertex_buffers(struct pipe_context *ctx, unsigned count,
unsigned unbind_num_trailing_slots, bool take_ownership,
const struct pipe_vertex_buffer *buffers)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct pipe_vertex_buffer *dst = sctx->vertex_buffer + start_slot;
- unsigned updated_mask = u_bit_consecutive(start_slot, count + unbind_num_trailing_slots);
+ unsigned updated_mask = u_bit_consecutive(0, count + unbind_num_trailing_slots);
uint32_t orig_unaligned = sctx->vertex_buffer_unaligned;
uint32_t unaligned = 0;
int i;
- assert(start_slot + count + unbind_num_trailing_slots <= ARRAY_SIZE(sctx->vertex_buffer));
+ assert(count + unbind_num_trailing_slots <= ARRAY_SIZE(sctx->vertex_buffer));
if (buffers) {
if (take_ownership) {
for (i = 0; i < count; i++) {
const struct pipe_vertex_buffer *src = buffers + i;
- struct pipe_vertex_buffer *dsti = dst + i;
+ struct pipe_vertex_buffer *dst = sctx->vertex_buffer + i;
struct pipe_resource *buf = src->buffer.resource;
- unsigned slot_bit = 1 << (start_slot + i);
+ unsigned slot_bit = 1 << i;
/* Only unreference bound vertex buffers. (take_ownership) */
- pipe_resource_reference(&dsti->buffer.resource, NULL);
+ pipe_resource_reference(&dst->buffer.resource, NULL);
if (src->buffer_offset & 3 || src->stride & 3)
unaligned |= slot_bit;
}
}
/* take_ownership allows us to copy pipe_resource pointers without refcounting. */
- memcpy(dst, buffers, count * sizeof(struct pipe_vertex_buffer));
+ memcpy(sctx->vertex_buffer, buffers, count * sizeof(struct pipe_vertex_buffer));
} else {
for (i = 0; i < count; i++) {
const struct pipe_vertex_buffer *src = buffers + i;
- struct pipe_vertex_buffer *dsti = dst + i;
+ struct pipe_vertex_buffer *dst = sctx->vertex_buffer + i;
struct pipe_resource *buf = src->buffer.resource;
- unsigned slot_bit = 1 << (start_slot + i);
+ unsigned slot_bit = 1 << i;
- pipe_resource_reference(&dsti->buffer.resource, buf);
- dsti->buffer_offset = src->buffer_offset;
- dsti->stride = src->stride;
+ pipe_resource_reference(&dst->buffer.resource, buf);
+ dst->buffer_offset = src->buffer_offset;
+ dst->stride = src->stride;
- if (dsti->buffer_offset & 3 || dsti->stride & 3)
+ if (dst->buffer_offset & 3 || dst->stride & 3)
unaligned |= slot_bit;
if (buf) {
}
} else {
for (i = 0; i < count; i++)
- pipe_resource_reference(&dst[i].buffer.resource, NULL);
+ pipe_resource_reference(&sctx->vertex_buffer[i].buffer.resource, NULL);
}
for (i = 0; i < unbind_num_trailing_slots; i++)
- pipe_resource_reference(&dst[count + i].buffer.resource, NULL);
+ pipe_resource_reference(&sctx->vertex_buffer[count + i].buffer.resource, NULL);
sctx->vertex_buffers_dirty = sctx->num_vertex_elements > 0;
sctx->vertex_buffer_unaligned = (orig_unaligned & ~updated_mask) | unaligned;
static void
softpipe_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
util_set_vertex_buffers_count(softpipe->vertex_buffer,
&softpipe->num_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots,
take_ownership);
softpipe->dirty |= SP_NEW_VERTEX;
- draw_set_vertex_buffers(softpipe->draw, start_slot, count,
- unbind_num_trailing_slots, buffers);
+ draw_set_vertex_buffers(softpipe->draw, count, unbind_num_trailing_slots, buffers);
}
static void
svga_set_vertex_buffers(struct pipe_context *pipe,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
util_set_vertex_buffers_count(svga->curr.vb,
&svga->curr.num_vertex_buffers,
- buffers, start_slot, count,
+ buffers, count,
unbind_num_trailing_slots,
take_ownership);
svga->curr.fs->draw_shader);
if (dirty & SVGA_NEW_VBUFFER)
- draw_set_vertex_buffers(svga->swtnl.draw, 0,
+ draw_set_vertex_buffers(svga->swtnl.draw,
svga->curr.num_vertex_buffers, 0,
svga->curr.vb);
}
static void
-tegra_set_vertex_buffers(struct pipe_context *pcontext, unsigned start_slot,
+tegra_set_vertex_buffers(struct pipe_context *pcontext,
unsigned num_buffers, unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *buffers)
buffers = buf;
}
- context->gpu->set_vertex_buffers(context->gpu, start_slot, num_buffers,
+ context->gpu->set_vertex_buffers(context->gpu, num_buffers,
unbind_num_trailing_slots,
take_ownership, buffers);
}
static void
v3d_set_vertex_buffers(struct pipe_context *pctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
struct v3d_vertexbuf_stateobj *so = &v3d->vertexbuf;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
- start_slot, count,
- unbind_num_trailing_slots,
+ count, unbind_num_trailing_slots,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
static void
vc4_set_vertex_buffers(struct pipe_context *pctx,
- unsigned start_slot, unsigned count,
+ unsigned count,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const struct pipe_vertex_buffer *vb)
struct vc4_vertexbuf_stateobj *so = &vc4->vertexbuf;
util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb,
- start_slot, count,
- unbind_num_trailing_slots,
+ count, unbind_num_trailing_slots,
take_ownership);
so->count = util_last_bit(so->enabled_mask);
}
static void virgl_set_vertex_buffers(struct pipe_context *ctx,
- unsigned start_slot,
unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
util_set_vertex_buffers_count(vctx->vertex_buffer,
&vctx->num_vertex_buffers,
- buffers, start_slot, num_buffers,
+ buffers, num_buffers,
unbind_num_trailing_slots,
take_ownership);
static void
zink_set_vertex_buffers(struct pipe_context *pctx,
- unsigned start_slot,
unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
const bool need_state_change = !zink_screen(pctx->screen)->info.have_EXT_extended_dynamic_state &&
!have_input_state;
uint32_t enabled_buffers = ctx->gfx_pipeline_state.vertex_buffers_enabled_mask;
- enabled_buffers |= u_bit_consecutive(start_slot, num_buffers);
- enabled_buffers &= ~u_bit_consecutive(start_slot + num_buffers, unbind_num_trailing_slots);
+ enabled_buffers |= u_bit_consecutive(0, num_buffers);
+ enabled_buffers &= ~u_bit_consecutive(num_buffers, unbind_num_trailing_slots);
bool stride_changed = false;
if (buffers) {
for (unsigned i = 0; i < num_buffers; ++i) {
const struct pipe_vertex_buffer *vb = buffers + i;
- struct pipe_vertex_buffer *ctx_vb = &ctx->vertex_buffers[start_slot + i];
+ struct pipe_vertex_buffer *ctx_vb = &ctx->vertex_buffers[i];
stride_changed |= ctx_vb->stride != vb->stride;
- update_existing_vbo(ctx, start_slot + i);
+ update_existing_vbo(ctx, i);
if (!take_ownership)
pipe_resource_reference(&ctx_vb->buffer.resource, vb->buffer.resource);
else {
}
if (vb->buffer.resource) {
struct zink_resource *res = zink_resource(vb->buffer.resource);
- res->vbo_bind_mask |= BITFIELD_BIT(start_slot + i);
+ res->vbo_bind_mask |= BITFIELD_BIT(i);
res->vbo_bind_count++;
res->gfx_barrier |= VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
res->barrier_access[0] |= VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
zink_batch_resource_usage_set(&ctx->batch, res, false, true);
res->obj->unordered_read = false;
} else {
- enabled_buffers &= ~BITFIELD_BIT(start_slot + i);
+ enabled_buffers &= ~BITFIELD_BIT(i);
}
}
} else {
for (unsigned i = 0; i < num_buffers; ++i) {
- update_existing_vbo(ctx, start_slot + i);
- pipe_resource_reference(&ctx->vertex_buffers[start_slot + i].buffer.resource, NULL);
+ update_existing_vbo(ctx, i);
+ pipe_resource_reference(&ctx->vertex_buffers[i].buffer.resource, NULL);
}
}
for (unsigned i = 0; i < unbind_num_trailing_slots; i++) {
- update_existing_vbo(ctx, start_slot + i);
- pipe_resource_reference(&ctx->vertex_buffers[start_slot + i].buffer.resource, NULL);
+ update_existing_vbo(ctx, i);
+ pipe_resource_reference(&ctx->vertex_buffers[i].buffer.resource, NULL);
}
if (need_state_change)
ctx->vertex_state_changed = true;
/* Resubmit old and new vertex buffers.
*/
- pipe->set_vertex_buffers(pipe, 0, PIPE_MAX_ATTRIBS, 0, false, pDevice->vertex_buffers);
+ pipe->set_vertex_buffers(pipe, PIPE_MAX_ATTRIBS, 0, false, pDevice->vertex_buffers);
}
unsigned start_vb;
struct pipe_vertex_buffer vb[PIPE_MAX_ATTRIBS];
size_t vb_sizes[PIPE_MAX_ATTRIBS]; //UINT32_MAX for unset
+ uint8_t vertex_buffer_index[PIPE_MAX_ATTRIBS]; /* temp storage to sort for start_vb */
struct cso_velems_state velem;
bool disable_multisample;
}
}
+static void update_vertex_elements_buffer_index(struct rendering_state *state)
+{
+ for (int i = 0; i < state->velem.count; i++)
+ state->velem.velems[i].vertex_buffer_index = state->vertex_buffer_index[i] - state->start_vb;
+}
+
static void emit_state(struct rendering_state *state)
{
if (!state->shaders[MESA_SHADER_FRAGMENT] && !state->noop_fs_bound) {
}
if (state->vb_dirty) {
- cso_set_vertex_buffers(state->cso, state->start_vb, state->num_vb, 0, false, state->vb);
+ cso_set_vertex_buffers(state->cso, state->num_vb, 0, false, state->vb);
state->vb_dirty = false;
}
if (state->ve_dirty) {
+ update_vertex_elements_buffer_index(state);
cso_set_vertex_elements(state->cso, &state->velem);
state->ve_dirty = false;
}
u_foreach_bit(a, ps->vi->attributes_valid) {
uint32_t b = ps->vi->attributes[a].binding;
state->velem.velems[a].src_offset = ps->vi->attributes[a].offset;
- state->velem.velems[a].vertex_buffer_index = b;
+ state->vertex_buffer_index[a] = b;
state->velem.velems[a].src_format =
lvp_vk_format_to_pipe_format(ps->vi->attributes[a].format);
state->velem.velems[a].dual_slot = false;
}
assert(binding);
state->velem.velems[location].src_offset = attrs[i].offset;
- state->velem.velems[location].vertex_buffer_index = attrs[i].binding;
+ state->vertex_buffer_index[location] = attrs[i].binding;
state->velem.velems[location].src_format = lvp_vk_format_to_pipe_format(attrs[i].format);
state->vb[attrs[i].binding].stride = binding->stride;
uint32_t d = binding->divisor;
assert(transfer);
memset(data, 0, 16);
This->context.pipe->buffer_unmap(This->context.pipe, transfer);
+
+ /* initialize dummy_vbo_sw */
+ if (pScreen != This->screen_sw) {
+
+ This->dummy_vbo_sw = This->screen_sw->resource_create(This->screen_sw, &tmpl);
+ if (!This->dummy_vbo_sw)
+ return D3DERR_OUTOFVIDEOMEMORY;
+
+ u_box_1d(0, 16, &box);
+ data = This->pipe_sw->buffer_map(This->pipe_sw, This->dummy_vbo_sw, 0,
+ PIPE_MAP_WRITE |
+ PIPE_MAP_DISCARD_WHOLE_RESOURCE,
+ &box, &transfer);
+ assert(data);
+ assert(transfer);
+ memset(data, 0, 16);
+ This->pipe_sw->buffer_unmap(This->pipe_sw, transfer);
+ } else {
+ This->dummy_vbo_sw = This->dummy_vbo;
+ }
}
This->cursor.software = false;
pipe_sampler_view_reference(&This->dummy_sampler_view, NULL);
pipe_resource_reference(&This->dummy_texture, NULL);
pipe_resource_reference(&This->dummy_vbo, NULL);
+ if (This->screen != This->screen_sw)
+ pipe_resource_reference(&This->dummy_vbo_sw, NULL);
FREE(This->state.vs_const_f);
FREE(This->context.vs_const_f);
FREE(This->state.ps_const_f);
/* dummy vbo (containing 0 0 0 0) to bind if vertex shader input
* is not bound to anything by the vertex declaration */
struct pipe_resource *dummy_vbo;
+ struct pipe_resource *dummy_vbo_sw;
BOOL device_needs_reset;
int minor_version_num;
long long available_texture_mem;
int dummy_vbo_stream = -1;
BOOL need_dummy_vbo = false;
struct cso_velems_state ve;
+ unsigned vtxbuf_mask;
+ unsigned vtxbuf_holes_map[PIPE_MAX_ATTRIBS];
context->stream_usage_mask = 0;
memset(vdecl_index_map, -1, 16);
if (need_dummy_vbo) {
u_foreach_bit(bit, BITFIELD_MASK(device->caps.MaxStreams) & ~used_streams) {
- dummy_vbo_stream = bit;
- break;
+ dummy_vbo_stream = bit;
+ break;
}
}
- /* there are less vertex shader inputs than stream slots,
+ /* there are fewer vertex shader inputs than stream slots,
* so if we need a slot for the dummy vbo, we should have found one */
assert (!need_dummy_vbo || dummy_vbo_stream != -1);
+ /* calculate vtxbuf_holes_map to match for update_vertex_buffers() function */
+ i = 0;
+ vtxbuf_mask = context->vtxbuf_mask |
+ (need_dummy_vbo ? BITFIELD_BIT(dummy_vbo_stream) : 0);
+ u_foreach_bit(bit, vtxbuf_mask)
+ vtxbuf_holes_map[bit] = i++;
+
for (n = 0; n < vs->num_inputs; ++n) {
index = vdecl_index_map[n];
if (index >= 0) {
ve.velems[n] = vdecl->elems[index];
+ ve.velems[n].vertex_buffer_index =
+ vtxbuf_holes_map[ve.velems[n].vertex_buffer_index];
b = ve.velems[n].vertex_buffer_index;
context->stream_usage_mask |= 1 << b;
/* XXX wine just uses 1 here: */
* vertex shader needs, we bind a dummy vbo with 0 0 0 0.
* This is not precised by the spec, but is the behaviour
* tested on win */
- ve.velems[n].vertex_buffer_index = dummy_vbo_stream;
+ ve.velems[n].vertex_buffer_index = vtxbuf_holes_map[dummy_vbo_stream];
ve.velems[n].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
ve.velems[n].src_offset = 0;
ve.velems[n].instance_divisor = 0;
if (context->dummy_vbo_bound_at != dummy_vbo_stream) {
if (context->dummy_vbo_bound_at >= 0)
context->changed.vtxbuf |= 1 << context->dummy_vbo_bound_at;
- if (dummy_vbo_stream >= 0) {
+ if (dummy_vbo_stream >= 0)
context->changed.vtxbuf |= 1 << dummy_vbo_stream;
- context->vbo_bound_done = false;
- }
context->dummy_vbo_bound_at = dummy_vbo_stream;
}
{
struct nine_context *context = &device->context;
struct pipe_context *pipe = context->pipe;
- struct pipe_vertex_buffer dummy_vtxbuf;
- uint32_t mask = context->changed.vtxbuf;
- unsigned i;
+ struct pipe_vertex_buffer vbuffer[PIPE_MAX_ATTRIBS];
+ unsigned vtxbuf_count;
+ unsigned trailing_count;
+ unsigned mask, i, vtxbuf_i;
- DBG("mask=%x\n", mask);
+ mask = context->vtxbuf_mask |
+ ((context->dummy_vbo_bound_at >= 0) ? BITFIELD_BIT(context->dummy_vbo_bound_at) : 0);
+ vtxbuf_count = util_bitcount(mask);
- if (context->dummy_vbo_bound_at >= 0) {
- if (!context->vbo_bound_done) {
- dummy_vtxbuf.buffer.resource = device->dummy_vbo;
- dummy_vtxbuf.stride = 0;
- dummy_vtxbuf.is_user_buffer = false;
- dummy_vtxbuf.buffer_offset = 0;
- pipe->set_vertex_buffers(pipe, context->dummy_vbo_bound_at,
- 1, 0, false, &dummy_vtxbuf);
- context->vbo_bound_done = true;
+ DBG("mask=%x\n", mask);
+ for (i = 0; mask; i++) {
+ vtxbuf_i = u_bit_scan(&mask);
+ if (vtxbuf_i == context->dummy_vbo_bound_at) {
+ vbuffer[i].buffer.resource = device->dummy_vbo;
+ vbuffer[i].stride = 0;
+ vbuffer[i].is_user_buffer = false;
+ vbuffer[i].buffer_offset = 0;
+ } else {
+ memcpy(&vbuffer[i], &context->vtxbuf[vtxbuf_i], sizeof(struct pipe_vertex_buffer));
}
- mask &= ~(1 << context->dummy_vbo_bound_at);
}
- for (i = 0; mask; mask >>= 1, ++i) {
- if (mask & 1) {
- if (context->vtxbuf[i].buffer.resource)
- pipe->set_vertex_buffers(pipe, i, 1, 0, false, &context->vtxbuf[i]);
- else
- pipe->set_vertex_buffers(pipe, i, 0, 1, false, NULL);
- }
- }
+ trailing_count = (context->last_vtxbuf_count <= vtxbuf_count) ? 0 :
+ context->last_vtxbuf_count - vtxbuf_count;
+ if (vtxbuf_count)
+ pipe->set_vertex_buffers(pipe, vtxbuf_count, trailing_count, false, vbuffer);
+ else
+ pipe->set_vertex_buffers(pipe, 0, trailing_count, false, NULL);
+ context->last_vtxbuf_count = vtxbuf_count;
context->changed.vtxbuf = 0;
}
pipe_resource_reference(&context->vtxbuf[i].buffer.resource, res);
context->changed.vtxbuf |= 1 << StreamNumber;
+ if (res)
+ context->vtxbuf_mask |= 1 << StreamNumber;
+ else
+ context->vtxbuf_mask &= ~(1 << StreamNumber);
}
void
else
info.index.user = user_ibuf;
- context->pipe->set_vertex_buffers(context->pipe, 0, 1, 0, false, vbuf);
+ context->pipe->set_vertex_buffers(context->pipe, 1, 0, false, vbuf);
context->changed.vtxbuf |= 1;
context->pipe->draw_vbo(context->pipe, &info, 0, NULL, &draw, 1);
for (s = 0; s < NINE_MAX_SAMPLERS; ++s)
context->changed.sampler[s] = ~0;
- if (!is_reset) {
+ if (!is_reset)
context->dummy_vbo_bound_at = -1;
- context->vbo_bound_done = false;
- }
}
void
if (!pipe || !cso)
return;
+ context->vtxbuf_mask = 0;
+
pipe->bind_vs_state(pipe, NULL);
pipe->bind_fs_state(pipe, NULL);
pipe->set_sampler_views(pipe, PIPE_SHADER_FRAGMENT, 0, 0,
NINE_MAX_SAMPLERS_PS, false, NULL);
- pipe->set_vertex_buffers(pipe, 0, 0, device->caps.MaxStreams, false, NULL);
+ pipe->set_vertex_buffers(pipe, 0, device->caps.MaxStreams, false, NULL);
for (i = 0; i < ARRAY_SIZE(context->rt); ++i)
nine_bind(&context->rt[i], NULL);
/* There is duplication with update_vertex_elements.
* TODO: Share the code */
-static void
+static int
update_vertex_elements_sw(struct NineDevice9 *device)
{
struct nine_state *state = &device->state;
const struct NineVertexDeclaration9 *vdecl = device->state.vdecl;
const struct NineVertexShader9 *vs;
- unsigned n, b, i;
+ unsigned n, b, i, j;
int index;
int8_t vdecl_index_map[16]; /* vs->num_inputs <= 16 */
int8_t used_streams[device->caps.MaxStreams];
- int dummy_vbo_stream = -1;
BOOL need_dummy_vbo = false;
+ int dummy_vbo_stream = -1;
struct cso_velems_state ve;
bool programmable_vs = state->vs && !(state->vdecl && state->vdecl->position_t);
+ unsigned vtxbuf_holes_map[PIPE_MAX_ATTRIBS];
memset(vdecl_index_map, -1, 16);
memset(used_streams, 0, device->caps.MaxStreams);
}
if (need_dummy_vbo) {
- for (i = 0; i < device->caps.MaxStreams; i++ ) {
+ for (i = 0; i < device->caps.MaxStreams; i++) {
if (!used_streams[i]) {
dummy_vbo_stream = i;
break;
}
}
}
- /* TODO handle dummy_vbo */
- assert (!need_dummy_vbo);
+ /* there are less vertex shader inputs than stream slots,
+ * so if we need a slot for the dummy vbo, we should have found one */
+ assert (!need_dummy_vbo || dummy_vbo_stream != -1);
+
+ /* calculate vtxbuf_holes_map to match for update_vertex_buffers_sw() function */
+ j = 0;
+ for (i = 0; i < device->caps.MaxStreams; i++) {
+ if (state->stream[i] || (need_dummy_vbo && dummy_vbo_stream == i)) {
+ vtxbuf_holes_map[i] = j;
+ j++;
+ }
+ }
for (n = 0; n < vs->num_inputs; ++n) {
index = vdecl_index_map[n];
if (index >= 0) {
ve.velems[n] = vdecl->elems[index];
+ ve.velems[n].vertex_buffer_index =
+ vtxbuf_holes_map[ve.velems[n].vertex_buffer_index];
b = ve.velems[n].vertex_buffer_index;
/* XXX wine just uses 1 here: */
if (state->stream_freq[b] & D3DSTREAMSOURCE_INSTANCEDATA)
* vertex shader needs, we bind a dummy vbo with 0 0 0 0.
* This is not precised by the spec, but is the behaviour
* tested on win */
- ve.velems[n].vertex_buffer_index = dummy_vbo_stream;
+ ve.velems[n].vertex_buffer_index = vtxbuf_holes_map[dummy_vbo_stream];
ve.velems[n].src_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
ve.velems[n].src_offset = 0;
ve.velems[n].instance_divisor = 0;
ve.count = vs->num_inputs;
cso_set_vertex_elements(device->cso_sw, &ve);
+ return dummy_vbo_stream;
}
static void
-update_vertex_buffers_sw(struct NineDevice9 *device, int start_vertice, int num_vertices)
+update_vertex_buffers_sw(struct NineDevice9 *device, int dummy_vbo_stream,
+ int start_vertice, int num_vertices)
{
struct pipe_context *pipe = nine_context_get_pipe_acquire(device);
struct pipe_context *pipe_sw = device->pipe_sw;
struct nine_state *state = &device->state;
struct nine_state_sw_internal *sw_internal = &device->state_sw_internal;
- struct pipe_vertex_buffer vtxbuf;
- uint32_t mask = 0xf;
- unsigned i;
-
- DBG("mask=%x\n", mask);
-
- /* TODO: handle dummy_vbo_bound_at */
-
- for (i = 0; mask; mask >>= 1, ++i) {
- if (mask & 1) {
- if (state->stream[i]) {
- unsigned offset;
- struct pipe_resource *buf;
- struct pipe_box box;
- void *userbuf;
-
- vtxbuf = state->vtxbuf[i];
- buf = NineVertexBuffer9_GetResource(state->stream[i], &offset);
-
- DBG("Locking %p (offset %d, length %d)\n", buf,
- vtxbuf.buffer_offset, num_vertices * vtxbuf.stride);
-
- u_box_1d(vtxbuf.buffer_offset + offset + start_vertice * vtxbuf.stride,
- num_vertices * vtxbuf.stride, &box);
-
- userbuf = pipe->buffer_map(pipe, buf, 0, PIPE_MAP_READ, &box,
- &(sw_internal->transfers_so[i]));
- vtxbuf.is_user_buffer = true;
- vtxbuf.buffer.user = userbuf;
-
- if (!device->driver_caps.user_sw_vbufs) {
- vtxbuf.buffer.resource = NULL;
- vtxbuf.is_user_buffer = false;
- u_upload_data(device->pipe_sw->stream_uploader,
- 0,
- box.width,
- 16,
- userbuf,
- &(vtxbuf.buffer_offset),
- &(vtxbuf.buffer.resource));
- u_upload_unmap(device->pipe_sw->stream_uploader);
- }
- pipe_sw->set_vertex_buffers(pipe_sw, i, 1, 0, false, &vtxbuf);
- pipe_vertex_buffer_unreference(&vtxbuf);
- } else
- pipe_sw->set_vertex_buffers(pipe_sw, i, 0, 1, false, NULL);
+ struct pipe_vertex_buffer vbuffer[PIPE_MAX_ATTRIBS];
+ unsigned vtxbuf_count = 0;
+ unsigned i, j = 0;
+
+ for (i = 0; i < device->caps.MaxStreams; i++) {
+ if (dummy_vbo_stream == i) {
+ vbuffer[j].buffer.resource = NULL;
+ pipe_resource_reference(&vbuffer[j].buffer.resource, device->dummy_vbo_sw);
+ vbuffer[j].stride = 0;
+ vbuffer[j].is_user_buffer = false;
+ vbuffer[j].buffer_offset = 0;
+ j++;
+ } else if (state->stream[i]) {
+ unsigned offset;
+ struct pipe_resource *buf;
+ struct pipe_box box;
+ void *userbuf;
+
+ vbuffer[j] = state->vtxbuf[i];
+ buf = NineVertexBuffer9_GetResource(state->stream[i], &offset);
+
+ DBG("Locking %p (offset %d, length %d)\n", buf,
+ vbuffer[j].buffer_offset, num_vertices * vbuffer[j].stride);
+
+ u_box_1d(vbuffer[j].buffer_offset + offset + start_vertice *
+ vbuffer[j].stride, num_vertices * vbuffer[j].stride, &box);
+
+ userbuf = pipe->buffer_map(pipe, buf, 0, PIPE_MAP_READ, &box,
+ &(sw_internal->transfers_so[i]));
+ vbuffer[j].is_user_buffer = true;
+ vbuffer[j].buffer.user = userbuf;
+
+ if (!device->driver_caps.user_sw_vbufs) {
+ vbuffer[j].buffer.resource = NULL;
+ vbuffer[j].is_user_buffer = false;
+ u_upload_data(device->pipe_sw->stream_uploader,
+ 0,
+ box.width,
+ 16,
+ userbuf,
+ &(vbuffer[j].buffer_offset),
+ &(vbuffer[j].buffer.resource));
+ u_upload_unmap(device->pipe_sw->stream_uploader);
+ }
+ j++;
}
}
+
+ vtxbuf_count = j;
+ pipe_sw->set_vertex_buffers(pipe_sw, vtxbuf_count, device->caps.MaxStreams-vtxbuf_count, true, vbuffer);
+
nine_context_get_pipe_release(device);
}
struct nine_state *state = &device->state;
bool programmable_vs = state->vs && !(state->vdecl && state->vdecl->position_t);
struct NineVertexShader9 *vs = programmable_vs ? device->state.vs : device->ff.vs;
+ int dummy_vbo_stream;
assert(programmable_vs);
DBG("Preparing draw\n");
cso_set_vertex_shader_handle(device->cso_sw,
NineVertexShader9_GetVariantProcessVertices(vs, vdecl_out, so));
- update_vertex_elements_sw(device);
- update_vertex_buffers_sw(device, start_vertice, num_vertices);
+ dummy_vbo_stream = update_vertex_elements_sw(device);
+ update_vertex_buffers_sw(device, dummy_vbo_stream, start_vertice, num_vertices);
update_vs_constants_sw(device);
DBG("Preparation succeeded\n");
}
struct pipe_context *pipe_sw = device->pipe_sw;
int i;
+ pipe_sw->set_vertex_buffers(pipe_sw, 0, device->caps.MaxStreams, false, NULL);
for (i = 0; i < 4; i++) {
- pipe_sw->set_vertex_buffers(pipe_sw, i, 0, 1, false, NULL);
if (sw_internal->transfers_so[i])
pipe->buffer_unmap(pipe, sw_internal->transfers_so[i]);
sw_internal->transfers_so[i] = NULL;
struct NineIndexBuffer9 *idxbuf;
struct NineVertexBuffer9 *stream[PIPE_MAX_ATTRIBS];
+ uint32_t stream_mask; /* i bit set for *stream[i] not NULL */
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS]; /* vtxbuf.buffer unused */
+ unsigned last_vtxbuf_count;
UINT stream_freq[PIPE_MAX_ATTRIBS];
struct pipe_clip_state clip;
struct NineVertexDeclaration9 *vdecl;
struct pipe_vertex_buffer vtxbuf[PIPE_MAX_ATTRIBS];
+ uint32_t vtxbuf_mask; /* i bit set for context->vtxbuf[i].buffer.resource not NULL */
+ uint32_t last_vtxbuf_count;
UINT stream_freq[PIPE_MAX_ATTRIBS];
uint32_t stream_instancedata_mask; /* derived from stream_freq */
uint32_t stream_usage_mask; /* derived from VS and vdecl */
uint16_t enabled_samplers_mask_ps;
int dummy_vbo_bound_at; /* -1 = not bound , >= 0 = bound index */
- bool vbo_bound_done;
bool inline_constants;
/**
* Bind an array of vertex buffers to the specified slots.
*
- * \param start_slot first vertex buffer slot
* \param count number of consecutive vertex buffers to bind.
* \param unbind_num_trailing_slots unbind slots after the bound slots
* \param take_ownership the caller holds buffer references and they
* \param buffers array of the buffers to bind
*/
void (*set_vertex_buffers)(struct pipe_context *,
- unsigned start_slot,
unsigned num_buffers,
unsigned unbind_num_trailing_slots,
bool take_ownership,
cso_set_vertex_elements(p->cso, &p->velem);
util_draw_vertex_buffer(p->pipe, p->cso,
- p->vbuf, 0, 0,
+ p->vbuf, 0,
MESA_PRIM_QUADS,
4, /* verts */
2); /* attribs/vert */
cso_set_vertex_elements(p->cso, &p->velem);
util_draw_vertex_buffer(p->pipe, p->cso,
- p->vbuf, 0, 0,
+ p->vbuf, 0,
MESA_PRIM_TRIANGLES,
3, /* verts */
2); /* attribs/vert */
# XXX: deprecated
self._state.vs.sampler_views = views
- def set_vertex_buffers(self, start_slot, num_buffers, unbind_num_trailing_slots, take_ownership, buffers):
- self._update(self._state.vertex_buffers, start_slot, num_buffers, buffers)
+ def set_vertex_buffers(self, num_buffers, unbind_num_trailing_slots, take_ownership, buffers):
+ self._update(self._state.vertex_buffers, 0, num_buffers, buffers)
def create_vertex_elements_state(self, num_elements, elements):
return elements[0:num_elements]
st->uses_user_vertex_buffers = uses_user_vertex_buffers;
} else {
/* Only vertex buffers. */
- cso_set_vertex_buffers(cso, 0, num_vbuffers, unbind_trailing_vbuffers,
+ cso_set_vertex_buffers(cso, num_vbuffers, unbind_trailing_vbuffers,
true, vbuffer);
/* This can change only when we update vertex elements. */
assert(st->uses_user_vertex_buffers == uses_user_vertex_buffers);
cso_set_viewport(cso, &vp);
}
- util_draw_vertex_buffer(pipe, cso, vbuffer, 0,
+ util_draw_vertex_buffer(pipe, cso, vbuffer,
offset, /* offset */
MESA_PRIM_TRIANGLE_FAN,
4, /* verts */
u_upload_unmap(st->pipe->stream_uploader);
- cso_set_vertex_buffers(st->cso_context, 0, 1, 0, false, &vb);
+ cso_set_vertex_buffers(st->cso_context, 1, 0, false, &vb);
st->last_num_vbuffers = MAX2(st->last_num_vbuffers, 1);
if (num_instances > 1) {
}
}
- draw_set_vertex_buffers(draw, 0, num_vbuffers, 0, vbuffers);
+ draw_set_vertex_buffers(draw, num_vbuffers, 0, vbuffers);
draw_set_vertex_elements(draw, vp->num_inputs, velements.velems);
if (info->index_size) {
if (!vbuffers[buf].is_user_buffer)
pipe_resource_reference(&vbuffers[buf].buffer.resource, NULL);
}
- draw_set_vertex_buffers(draw, 0, 0, num_vbuffers, NULL);
+ draw_set_vertex_buffers(draw, 0, num_vbuffers, NULL);
draw_bind_vertex_shader(draw, NULL);
}
cso_set_vertex_elements(cso, &velem);
- cso_set_vertex_buffers(cso, 0, 1, 0, false, &vbo);
+ cso_set_vertex_buffers(cso, 1, 0, false, &vbo);
st->last_num_vbuffers = MAX2(st->last_num_vbuffers, 1);
pipe_resource_reference(&vbo.buffer.resource, NULL);