2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <xf86atomic.h>
39 #include "libdrm_lists.h"
40 #include "nouveau_drm.h"
45 struct nouveau_pushbuf_krec {
46 struct nouveau_pushbuf_krec *next;
47 struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
48 struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
49 struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
57 struct nouveau_pushbuf_priv {
58 struct nouveau_pushbuf base;
59 struct nouveau_pushbuf_krec *list;
60 struct nouveau_pushbuf_krec *krec;
61 struct nouveau_list bctx_list;
62 struct nouveau_bo *bo;
70 struct nouveau_bo *bos[];
73 static inline struct nouveau_pushbuf_priv *
74 nouveau_pushbuf(struct nouveau_pushbuf *push)
76 return (struct nouveau_pushbuf_priv *)push;
79 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
80 static int pushbuf_flush(struct nouveau_pushbuf *);
83 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
86 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
87 struct nouveau_pushbuf_krec *krec = nvpb->krec;
88 struct nouveau_device *dev = push->client->device;
89 struct nouveau_bo *kbo;
90 struct drm_nouveau_gem_pushbuf_bo *kref;
93 /* VRAM is the only valid domain. GART and VRAM|GART buffers
94 * are all accounted to GART, so if this doesn't fit in VRAM
95 * straight up, a flush is needed.
97 if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
98 if (krec->vram_used + bo->size > dev->vram_limit)
100 krec->vram_used += bo->size;
104 /* GART or VRAM|GART buffer. Account both of these buffer types
105 * to GART only for the moment, which simplifies things. If the
106 * buffer can fit already, we're done here.
108 if (krec->gart_used + bo->size <= dev->gart_limit) {
109 krec->gart_used += bo->size;
113 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
114 * fit into available VRAM, turn it into a VRAM buffer
116 if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
117 krec->vram_used + bo->size <= dev->vram_limit) {
118 *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
119 krec->vram_used += bo->size;
123 /* Still couldn't fit the buffer in anywhere, so as a last resort;
124 * scan the buffer list for VRAM|GART buffers and turn them into
125 * VRAM buffers until we have enough space in GART for this one
128 for (i = 0; i < krec->nr_buffer; i++, kref++) {
129 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
132 kbo = (void *)(unsigned long)kref->user_priv;
133 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
134 krec->vram_used + kbo->size > dev->vram_limit)
137 kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
138 krec->gart_used -= kbo->size;
139 krec->vram_used += kbo->size;
140 if (krec->gart_used + bo->size <= dev->gart_limit) {
141 krec->gart_used += bo->size;
146 /* Couldn't resolve a placement, need to force a flush */
150 static struct drm_nouveau_gem_pushbuf_bo *
151 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
154 struct nouveau_device *dev = push->client->device;
155 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
156 struct nouveau_pushbuf_krec *krec = nvpb->krec;
157 struct nouveau_pushbuf *fpush;
158 struct drm_nouveau_gem_pushbuf_bo *kref;
159 uint32_t domains, domains_wr, domains_rd;
162 if (flags & NOUVEAU_BO_VRAM)
163 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
164 if (flags & NOUVEAU_BO_GART)
165 domains |= NOUVEAU_GEM_DOMAIN_GART;
166 domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
167 domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
169 /* if buffer is referenced on another pushbuf that is owned by the
170 * same client, we need to flush the other pushbuf first to ensure
171 * the correct ordering of commands
173 fpush = cli_push_get(push->client, bo);
174 if (fpush && fpush != push)
175 pushbuf_flush(fpush);
177 kref = cli_kref_get(push->client, bo);
179 /* possible conflict in memory types - flush and retry */
180 if (!(kref->valid_domains & domains))
183 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
184 * it'll fit in VRAM and force a flush if not.
186 if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
187 ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
188 if (krec->vram_used + bo->size > dev->vram_limit)
190 krec->vram_used += bo->size;
191 krec->gart_used -= bo->size;
194 kref->valid_domains &= domains;
195 kref->write_domains |= domains_wr;
196 kref->read_domains |= domains_rd;
198 if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
199 !pushbuf_kref_fits(push, bo, &domains))
202 kref = &krec->buffer[krec->nr_buffer++];
203 kref->user_priv = (unsigned long)bo;
204 kref->handle = bo->handle;
205 kref->valid_domains = domains;
206 kref->write_domains = domains_wr;
207 kref->read_domains = domains_rd;
208 kref->presumed.valid = 1;
209 kref->presumed.offset = bo->offset;
210 if (bo->flags & NOUVEAU_BO_VRAM)
211 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
213 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
215 cli_kref_set(push->client, bo, kref, push);
216 atomic_inc(&nouveau_bo(bo)->refcnt);
223 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
224 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
226 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
227 struct nouveau_pushbuf_krec *krec = nvpb->krec;
228 struct drm_nouveau_gem_pushbuf_reloc *krel;
229 struct drm_nouveau_gem_pushbuf_bo *pkref;
230 struct drm_nouveau_gem_pushbuf_bo *bkref;
231 uint32_t reloc = data;
233 pkref = cli_kref_get(push->client, nvpb->bo);
234 bkref = cli_kref_get(push->client, bo);
235 krel = &krec->reloc[krec->nr_reloc++];
237 krel->reloc_bo_index = pkref - krec->buffer;
238 krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
239 krel->bo_index = bkref - krec->buffer;
245 if (flags & NOUVEAU_BO_LOW) {
246 reloc = (bkref->presumed.offset + data);
247 krel->flags |= NOUVEAU_GEM_RELOC_LOW;
249 if (flags & NOUVEAU_BO_HIGH) {
250 reloc = (bkref->presumed.offset + data) >> 32;
251 krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
253 if (flags & NOUVEAU_BO_OR) {
254 if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
258 krel->flags |= NOUVEAU_GEM_RELOC_OR;
265 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
267 struct drm_nouveau_gem_pushbuf_reloc *krel;
268 struct drm_nouveau_gem_pushbuf_push *kpsh;
269 struct drm_nouveau_gem_pushbuf_bo *kref;
270 struct nouveau_bo *bo;
274 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
275 krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
278 for (i = 0; i < krec->nr_buffer; i++, kref++) {
279 err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
280 kref->handle, kref->valid_domains,
281 kref->read_domains, kref->write_domains);
285 for (i = 0; i < krec->nr_reloc; i++, krel++) {
286 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 chid, krel->reloc_bo_index, krel->reloc_bo_offset,
288 krel->bo_index, krel->flags, krel->data,
289 krel->vor, krel->tor);
293 for (i = 0; i < krec->nr_push; i++, kpsh++) {
294 kref = krec->buffer + kpsh->bo_index;
295 bo = (void *)(unsigned long)kref->user_priv;
296 bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
297 end = bgn + (kpsh->length /4);
299 err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
300 (unsigned long long)kpsh->offset,
301 (unsigned long long)(kpsh->offset + kpsh->length));
303 err("\t0x%08x\n", *bgn++);
308 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
310 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
311 struct nouveau_pushbuf_krec *krec = nvpb->list;
312 struct nouveau_device *dev = push->client->device;
313 struct drm_nouveau_gem_pushbuf_bo_presumed *info;
314 struct drm_nouveau_gem_pushbuf_bo *kref;
315 struct drm_nouveau_gem_pushbuf req;
316 struct nouveau_fifo *fifo = chan->data;
317 struct nouveau_bo *bo;
321 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
324 if (push->kick_notify)
325 push->kick_notify(push);
327 nouveau_pushbuf_data(push, NULL, 0, 0);
329 while (krec && krec->nr_push) {
330 req.channel = fifo->channel;
331 req.nr_buffers = krec->nr_buffer;
332 req.buffers = (uint64_t)(unsigned long)krec->buffer;
333 req.nr_relocs = krec->nr_reloc;
334 req.nr_push = krec->nr_push;
335 req.relocs = (uint64_t)(unsigned long)krec->reloc;
336 req.push = (uint64_t)(unsigned long)krec->push;
337 req.suffix0 = nvpb->suffix0;
338 req.suffix1 = nvpb->suffix1;
339 req.vram_available = 0; /* for valgrind */
340 req.gart_available = 0;
343 pushbuf_dump(krec, krec_id++, fifo->channel);
346 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
348 nvpb->suffix0 = req.suffix0;
349 nvpb->suffix1 = req.suffix1;
350 dev->vram_limit = (req.vram_available * 80) / 100;
351 dev->gart_limit = (req.gart_available * 80) / 100;
358 err("kernel rejected pushbuf: %s\n", strerror(-ret));
359 pushbuf_dump(krec, krec_id++, fifo->channel);
364 for (i = 0; i < krec->nr_buffer; i++, kref++) {
365 bo = (void *)(unsigned long)kref->user_priv;
367 info = &kref->presumed;
369 bo->flags &= ~NOUVEAU_BO_APER;
370 if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
371 bo->flags |= NOUVEAU_BO_VRAM;
373 bo->flags |= NOUVEAU_BO_GART;
374 bo->offset = info->offset;
377 if (kref->write_domains)
378 nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
379 if (kref->read_domains)
380 nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
390 pushbuf_flush(struct nouveau_pushbuf *push)
392 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
393 struct nouveau_pushbuf_krec *krec = nvpb->krec;
394 struct drm_nouveau_gem_pushbuf_bo *kref;
395 struct nouveau_bufctx *bctx, *btmp;
396 struct nouveau_bo *bo;
400 ret = pushbuf_submit(push, push->channel);
402 nouveau_pushbuf_data(push, NULL, 0, 0);
403 krec->next = malloc(sizeof(*krec));
404 nvpb->krec = krec->next;
408 for (i = 0; i < krec->nr_buffer; i++, kref++) {
409 bo = (void *)(unsigned long)kref->user_priv;
410 cli_kref_set(push->client, bo, NULL, NULL);
412 nouveau_bo_ref(NULL, &bo);
422 DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
423 DRMLISTJOIN(&bctx->current, &bctx->pending);
424 DRMINITLISTHEAD(&bctx->current);
425 DRMLISTDELINIT(&bctx->head);
432 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
434 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
435 struct nouveau_pushbuf_krec *krec = nvpb->krec;
436 struct drm_nouveau_gem_pushbuf_bo *kref;
438 kref = krec->buffer + sref;
439 while (krec->nr_buffer-- > sref) {
440 struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
441 cli_kref_set(push->client, bo, NULL, NULL);
442 nouveau_bo_ref(NULL, &bo);
445 krec->nr_buffer = sref;
446 krec->nr_reloc = srel;
450 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
451 struct nouveau_pushbuf_refn *refs, int nr)
453 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
454 struct nouveau_pushbuf_krec *krec = nvpb->krec;
455 struct drm_nouveau_gem_pushbuf_bo *kref;
456 int sref = krec->nr_buffer;
459 for (i = 0; i < nr; i++) {
460 kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
468 pushbuf_refn_fail(push, sref, krec->nr_reloc);
471 nouveau_pushbuf_space(push, 0, 0, 0);
472 return pushbuf_refn(push, false, refs, nr);
480 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
482 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
483 struct nouveau_pushbuf_krec *krec = nvpb->krec;
484 struct drm_nouveau_gem_pushbuf_bo *kref;
485 struct nouveau_bufctx *bctx = push->bufctx;
486 struct nouveau_bufref *bref;
487 int relocs = bctx ? bctx->relocs * 2: 0;
490 ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
491 if (ret || bctx == NULL)
494 sref = krec->nr_buffer;
495 srel = krec->nr_reloc;
497 DRMLISTDEL(&bctx->head);
498 DRMLISTADD(&bctx->head, &nvpb->bctx_list);
500 DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
501 kref = pushbuf_kref(push, bref->bo, bref->flags);
508 pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
510 pushbuf_krel(push, bref->bo, bref->data, bref->flags,
511 bref->vor, bref->tor);
516 DRMLISTJOIN(&bctx->pending, &bctx->current);
517 DRMINITLISTHEAD(&bctx->pending);
520 pushbuf_refn_fail(push, sref, srel);
523 return pushbuf_validate(push, false);
531 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
532 int nr, uint32_t size, bool immediate,
533 struct nouveau_pushbuf **ppush)
535 struct nouveau_device *dev = client->device;
536 struct nouveau_fifo *fifo = chan->data;
537 struct nouveau_pushbuf_priv *nvpb;
538 struct nouveau_pushbuf *push;
539 struct drm_nouveau_gem_pushbuf req = {};
542 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
545 /* nop pushbuf call, to get the current "return to main" sequence
546 * we need to append to the pushbuf on early chipsets
548 req.channel = fifo->channel;
550 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
555 nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
560 nvpb->suffix0 = req.suffix0;
561 nvpb->suffix1 = req.suffix1;
563 nvpb->suffix0 = 0xffffffff;
564 nvpb->suffix1 = 0xffffffff;
566 nvpb->krec = calloc(1, sizeof(*nvpb->krec));
567 nvpb->list = nvpb->krec;
574 push->client = client;
575 push->channel = immediate ? chan : NULL;
576 push->flags = NOUVEAU_BO_RD;
577 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
578 push->flags |= NOUVEAU_BO_VRAM;
579 nvpb->type = NOUVEAU_BO_VRAM;
581 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
582 push->flags |= NOUVEAU_BO_GART;
583 nvpb->type = NOUVEAU_BO_GART;
585 nvpb->type |= NOUVEAU_BO_MAP;
587 for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
588 ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
589 NULL, &nvpb->bos[nvpb->bo_nr]);
591 nouveau_pushbuf_del(&push);
596 DRMINITLISTHEAD(&nvpb->bctx_list);
602 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
604 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
606 struct drm_nouveau_gem_pushbuf_bo *kref;
607 struct nouveau_pushbuf_krec *krec;
608 while ((krec = nvpb->list)) {
610 while (krec->nr_buffer--) {
611 unsigned long priv = kref++->user_priv;
612 struct nouveau_bo *bo = (void *)priv;
613 cli_kref_set(nvpb->base.client, bo, NULL, NULL);
614 nouveau_bo_ref(NULL, &bo);
616 nvpb->list = krec->next;
619 while (nvpb->bo_nr--)
620 nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
621 nouveau_bo_ref(NULL, &nvpb->bo);
627 struct nouveau_bufctx *
628 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
630 struct nouveau_bufctx *prev = push->bufctx;
636 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
637 uint32_t dwords, uint32_t relocs, uint32_t pushes)
639 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
640 struct nouveau_pushbuf_krec *krec = nvpb->krec;
641 struct nouveau_client *client = push->client;
642 struct nouveau_bo *bo = NULL;
643 bool flushed = false;
646 /* switch to next buffer if insufficient space in the current one */
647 if (push->cur + dwords >= push->end) {
648 if (nvpb->bo_next < nvpb->bo_nr) {
649 nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
650 if (nvpb->bo_next == nvpb->bo_nr && push->channel)
653 ret = nouveau_bo_new(client->device, nvpb->type, 0,
654 nvpb->bos[0]->size, NULL, &bo);
660 /* make sure there's always enough space to queue up the pending
661 * data in the pushbuf proper
665 /* need to flush if we've run out of space on an immediate pushbuf,
666 * if the new buffer won't fit, or if the kernel push/reloc limits
669 if ((bo && ( push->channel ||
670 !pushbuf_kref(push, bo, push->flags))) ||
671 krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
672 krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
673 if (nvpb->bo && krec->nr_buffer)
678 /* if necessary, switch to new buffer */
680 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
684 nouveau_pushbuf_data(push, NULL, 0, 0);
685 nouveau_bo_ref(bo, &nvpb->bo);
686 nouveau_bo_ref(NULL, &bo);
688 nvpb->bgn = nvpb->bo->map;
689 nvpb->ptr = nvpb->bgn;
690 push->cur = nvpb->bgn;
691 push->end = push->cur + (nvpb->bo->size / 4);
692 push->end -= 2 + push->rsvd_kick; /* space for suffix */
695 pushbuf_kref(push, nvpb->bo, push->flags);
696 return flushed ? pushbuf_validate(push, false) : 0;
700 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
701 uint64_t offset, uint64_t length)
703 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
704 struct nouveau_pushbuf_krec *krec = nvpb->krec;
705 struct drm_nouveau_gem_pushbuf_push *kpsh;
706 struct drm_nouveau_gem_pushbuf_bo *kref;
708 if (bo != nvpb->bo && nvpb->bgn != push->cur) {
709 if (nvpb->suffix0 || nvpb->suffix1) {
710 *push->cur++ = nvpb->suffix0;
711 *push->cur++ = nvpb->suffix1;
714 nouveau_pushbuf_data(push, nvpb->bo,
715 (nvpb->bgn - nvpb->ptr) * 4,
716 (push->cur - nvpb->bgn) * 4);
717 nvpb->bgn = push->cur;
721 kref = cli_kref_get(push->client, bo);
722 kpsh = &krec->push[krec->nr_push++];
723 kpsh->bo_index = kref - krec->buffer;
724 kpsh->offset = offset;
725 kpsh->length = length;
730 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
731 struct nouveau_pushbuf_refn *refs, int nr)
733 return pushbuf_refn(push, true, refs, nr);
737 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
738 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
740 *push->cur++ = pushbuf_krel(push, bo, data, flags, vor, tor);
744 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
746 return pushbuf_validate(push, true);
750 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
752 struct drm_nouveau_gem_pushbuf_bo *kref;
755 if (cli_push_get(push->client, bo) == push) {
756 kref = cli_kref_get(push->client, bo);
757 if (kref->read_domains)
758 flags |= NOUVEAU_BO_RD;
759 if (kref->write_domains)
760 flags |= NOUVEAU_BO_WR;
767 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
770 return pushbuf_submit(push, chan);
772 return pushbuf_validate(push, false);