{
struct nv50_context *nv50 = nv50_context(pipe);
struct nouveau_pushbuf *push = nv50->base.pushbuf;
+ bool tex_dirty = false;
int i, s;
/* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
push->kick_notify = nv50_draw_vbo_kick_notify;
+ /* TODO: Instead of iterating over all the buffer resources looking for
+ * coherent buffers, keep track of a context-wide count.
+ */
for (s = 0; s < 3 && !nv50->cb_dirty; ++s) {
uint32_t valid = nv50->constbuf_valid[s];
nv50->cb_dirty = false;
}
+ for (s = 0; s < 3 && !tex_dirty; ++s) {
+ for (i = 0; i < nv50->num_textures[s] && !tex_dirty; ++i) {
+ if (!nv50->textures[s][i] ||
+ nv50->textures[s][i]->texture->target != PIPE_BUFFER)
+ continue;
+ if (nv50->textures[s][i]->texture->flags &
+ PIPE_RESOURCE_FLAG_MAP_COHERENT)
+ tex_dirty = true;
+ }
+ }
+ if (tex_dirty) {
+ BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
+ PUSH_DATA (push, 0x20);
+ }
+
if (nv50->vbo_fifo) {
nv50_push_vbo(nv50, info);
push->kick_notify = nv50_default_kick_notify;
push->kick_notify = nvc0_draw_vbo_kick_notify;
+ /* TODO: Instead of iterating over all the buffer resources looking for
+ * coherent buffers, keep track of a context-wide count.
+ */
for (s = 0; s < 5 && !nvc0->cb_dirty; ++s) {
uint32_t valid = nvc0->constbuf_valid[s];
nvc0->cb_dirty = false;
}
+ for (s = 0; s < 5; ++s) {
+ for (int i = 0; i < nvc0->num_textures[s]; ++i) {
+ struct nv50_tic_entry *tic = nv50_tic_entry(nvc0->textures[s][i]);
+ struct pipe_resource *res;
+ if (!tic)
+ continue;
+ res = nvc0->textures[s][i]->texture;
+ if (res->target != PIPE_BUFFER ||
+ !(res->flags & PIPE_RESOURCE_FLAG_MAP_COHERENT))
+ continue;
+
+ BEGIN_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 1);
+ PUSH_DATA (push, (tic->id << 4) | 1);
+ NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_cache_flush_count, 1);
+ }
+ }
+
if (nvc0->state.vbo_mode) {
nvc0_push_vbo(nvc0, info);
push->kick_notify = nvc0_default_kick_notify;