2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <xf86atomic.h>
39 #include "libdrm_lists.h"
40 #include "nouveau_drm.h"
45 struct nouveau_pushbuf_krec {
46 struct nouveau_pushbuf_krec *next;
47 struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
48 struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
49 struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
57 struct nouveau_pushbuf_priv {
58 struct nouveau_pushbuf base;
59 struct nouveau_pushbuf_krec *list;
60 struct nouveau_pushbuf_krec *krec;
61 struct nouveau_list bctx_list;
62 struct nouveau_bo *bo;
70 struct nouveau_bo *bos[];
73 static inline struct nouveau_pushbuf_priv *
74 nouveau_pushbuf(struct nouveau_pushbuf *push)
76 return (struct nouveau_pushbuf_priv *)push;
79 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
80 static int pushbuf_flush(struct nouveau_pushbuf *);
83 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
86 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
87 struct nouveau_pushbuf_krec *krec = nvpb->krec;
88 struct nouveau_device *dev = push->client->device;
89 struct nouveau_bo *kbo;
90 struct drm_nouveau_gem_pushbuf_bo *kref;
93 /* VRAM is the only valid domain. GART and VRAM|GART buffers
94 * are all accounted to GART, so if this doesn't fit in VRAM
95 * straight up, a flush is needed.
97 if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
98 if (krec->vram_used + bo->size > dev->vram_limit)
100 krec->vram_used += bo->size;
104 /* GART or VRAM|GART buffer. Account both of these buffer types
105 * to GART only for the moment, which simplifies things. If the
106 * buffer can fit already, we're done here.
108 if (krec->gart_used + bo->size <= dev->gart_limit) {
109 krec->gart_used += bo->size;
113 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
114 * fit into available VRAM, turn it into a VRAM buffer
116 if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
117 krec->vram_used + bo->size <= dev->vram_limit) {
118 *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
119 krec->vram_used += bo->size;
123 /* Still couldn't fit the buffer in anywhere, so as a last resort;
124 * scan the buffer list for VRAM|GART buffers and turn them into
125 * VRAM buffers until we have enough space in GART for this one
128 for (i = 0; i < krec->nr_buffer; i++, kref++) {
129 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
132 kbo = (void *)(unsigned long)kref->user_priv;
133 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
134 krec->vram_used + kbo->size > dev->vram_limit)
137 kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
138 krec->gart_used -= kbo->size;
139 krec->vram_used += kbo->size;
140 if (krec->gart_used + bo->size <= dev->gart_limit) {
141 krec->gart_used += bo->size;
146 /* Couldn't resolve a placement, need to force a flush */
150 static struct drm_nouveau_gem_pushbuf_bo *
151 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
154 struct nouveau_device *dev = push->client->device;
155 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
156 struct nouveau_pushbuf_krec *krec = nvpb->krec;
157 struct nouveau_pushbuf *fpush;
158 struct drm_nouveau_gem_pushbuf_bo *kref;
159 uint32_t domains, domains_wr, domains_rd;
162 if (flags & NOUVEAU_BO_VRAM)
163 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
164 if (flags & NOUVEAU_BO_GART)
165 domains |= NOUVEAU_GEM_DOMAIN_GART;
166 domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
167 domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
169 /* if buffer is referenced on another pushbuf that is owned by the
170 * same client, we need to flush the other pushbuf first to ensure
171 * the correct ordering of commands
173 fpush = cli_push_get(push->client, bo);
174 if (fpush && fpush != push)
175 pushbuf_flush(fpush);
177 kref = cli_kref_get(push->client, bo);
179 /* possible conflict in memory types - flush and retry */
180 if (!(kref->valid_domains & domains))
183 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
184 * it'll fit in VRAM and force a flush if not.
186 if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
187 ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
188 if (krec->vram_used + bo->size > dev->vram_limit)
190 krec->vram_used += bo->size;
191 krec->gart_used -= bo->size;
194 kref->valid_domains &= domains;
195 kref->write_domains |= domains_wr;
196 kref->read_domains |= domains_rd;
198 if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
199 !pushbuf_kref_fits(push, bo, &domains))
202 kref = &krec->buffer[krec->nr_buffer++];
203 kref->user_priv = (unsigned long)bo;
204 kref->handle = bo->handle;
205 kref->valid_domains = domains;
206 kref->write_domains = domains_wr;
207 kref->read_domains = domains_rd;
208 kref->presumed.valid = 1;
209 kref->presumed.offset = bo->offset;
210 if (bo->flags & NOUVEAU_BO_VRAM)
211 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
213 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
215 cli_kref_set(push->client, bo, kref, push);
216 atomic_inc(&nouveau_bo(bo)->refcnt);
223 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
224 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
226 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
227 struct nouveau_pushbuf_krec *krec = nvpb->krec;
228 struct drm_nouveau_gem_pushbuf_reloc *krel;
229 struct drm_nouveau_gem_pushbuf_bo *pkref;
230 struct drm_nouveau_gem_pushbuf_bo *bkref;
231 uint32_t reloc = data;
233 pkref = cli_kref_get(push->client, nvpb->bo);
234 bkref = cli_kref_get(push->client, bo);
235 krel = &krec->reloc[krec->nr_reloc++];
237 krel->reloc_bo_index = pkref - krec->buffer;
238 krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
239 krel->bo_index = bkref - krec->buffer;
245 if (flags & NOUVEAU_BO_LOW) {
246 reloc = (bkref->presumed.offset + data);
247 krel->flags |= NOUVEAU_GEM_RELOC_LOW;
249 if (flags & NOUVEAU_BO_HIGH) {
250 reloc = (bkref->presumed.offset + data) >> 32;
251 krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
253 if (flags & NOUVEAU_BO_OR) {
254 if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
258 krel->flags |= NOUVEAU_GEM_RELOC_OR;
265 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
267 struct drm_nouveau_gem_pushbuf_reloc *krel;
268 struct drm_nouveau_gem_pushbuf_push *kpsh;
269 struct drm_nouveau_gem_pushbuf_bo *kref;
270 struct nouveau_bo *bo;
274 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
275 krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
278 for (i = 0; i < krec->nr_buffer; i++, kref++) {
279 err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
280 kref->handle, kref->valid_domains,
281 kref->read_domains, kref->write_domains);
285 for (i = 0; i < krec->nr_reloc; i++, krel++) {
286 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 chid, krel->reloc_bo_index, krel->reloc_bo_offset,
288 krel->bo_index, krel->flags, krel->data,
289 krel->vor, krel->tor);
293 for (i = 0; i < krec->nr_push; i++, kpsh++) {
294 kref = krec->buffer + kpsh->bo_index;
295 bo = (void *)(unsigned long)kref->user_priv;
296 bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
297 end = bgn + (kpsh->length /4);
299 err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
300 (unsigned long long)kpsh->offset,
301 (unsigned long long)(kpsh->offset + kpsh->length));
303 err("\t0x%08x\n", *bgn++);
308 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
310 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
311 struct nouveau_pushbuf_krec *krec = nvpb->list;
312 struct nouveau_device *dev = push->client->device;
313 struct drm_nouveau_gem_pushbuf_bo_presumed *info;
314 struct drm_nouveau_gem_pushbuf_bo *kref;
315 struct drm_nouveau_gem_pushbuf req;
316 struct nouveau_fifo *fifo = chan->data;
317 struct nouveau_bo *bo;
321 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
324 if (push->kick_notify)
325 push->kick_notify(push);
327 nouveau_pushbuf_data(push, NULL, 0, 0);
329 while (krec && krec->nr_push) {
330 req.channel = fifo->channel;
331 req.nr_buffers = krec->nr_buffer;
332 req.buffers = (uint64_t)(unsigned long)krec->buffer;
333 req.nr_relocs = krec->nr_reloc;
334 req.nr_push = krec->nr_push;
335 req.relocs = (uint64_t)(unsigned long)krec->reloc;
336 req.push = (uint64_t)(unsigned long)krec->push;
337 req.suffix0 = nvpb->suffix0;
338 req.suffix1 = nvpb->suffix1;
341 pushbuf_dump(krec, krec_id++, fifo->channel);
344 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
346 nvpb->suffix0 = req.suffix0;
347 nvpb->suffix1 = req.suffix1;
348 dev->vram_limit = (req.vram_available * 80) / 100;
349 dev->gart_limit = (req.gart_available * 80) / 100;
356 err("kernel rejected pushbuf: %s\n", strerror(-ret));
357 pushbuf_dump(krec, krec_id++, fifo->channel);
362 for (i = 0; i < krec->nr_buffer; i++, kref++) {
363 bo = (void *)(unsigned long)kref->user_priv;
365 info = &kref->presumed;
367 bo->flags &= ~NOUVEAU_BO_APER;
368 if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
369 bo->flags |= NOUVEAU_BO_VRAM;
371 bo->flags |= NOUVEAU_BO_GART;
372 bo->offset = info->offset;
375 if (kref->write_domains)
376 nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
377 if (kref->read_domains)
378 nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
388 pushbuf_flush(struct nouveau_pushbuf *push)
390 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
391 struct nouveau_pushbuf_krec *krec = nvpb->krec;
392 struct drm_nouveau_gem_pushbuf_bo *kref;
393 struct nouveau_bufctx *bctx, *btmp;
394 struct nouveau_bo *bo;
398 ret = pushbuf_submit(push, push->channel);
400 nouveau_pushbuf_data(push, NULL, 0, 0);
401 krec->next = malloc(sizeof(*krec));
402 nvpb->krec = krec->next;
406 for (i = 0; i < krec->nr_buffer; i++, kref++) {
407 bo = (void *)(unsigned long)kref->user_priv;
408 cli_kref_set(push->client, bo, NULL, NULL);
410 nouveau_bo_ref(NULL, &bo);
420 DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
421 DRMLISTJOIN(&bctx->current, &bctx->pending);
422 DRMINITLISTHEAD(&bctx->current);
423 DRMLISTDELINIT(&bctx->head);
430 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
432 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
433 struct nouveau_pushbuf_krec *krec = nvpb->krec;
434 struct drm_nouveau_gem_pushbuf_bo *kref;
436 kref = krec->buffer + sref;
437 while (krec->nr_buffer-- > sref) {
438 struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
439 cli_kref_set(push->client, bo, NULL, NULL);
440 nouveau_bo_ref(NULL, &bo);
443 krec->nr_buffer = sref;
444 krec->nr_reloc = srel;
448 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
449 struct nouveau_pushbuf_refn *refs, int nr)
451 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
452 struct nouveau_pushbuf_krec *krec = nvpb->krec;
453 struct drm_nouveau_gem_pushbuf_bo *kref;
454 int sref = krec->nr_buffer;
457 for (i = 0; i < nr; i++) {
458 kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
466 pushbuf_refn_fail(push, sref, krec->nr_reloc);
469 nouveau_pushbuf_space(push, 0, 0, 0);
470 return pushbuf_refn(push, false, refs, nr);
478 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
480 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
481 struct nouveau_pushbuf_krec *krec = nvpb->krec;
482 struct drm_nouveau_gem_pushbuf_bo *kref;
483 struct nouveau_bufctx *bctx = push->bufctx;
484 struct nouveau_bufref *bref;
485 int relocs = bctx ? bctx->relocs * 2: 0;
488 ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
489 if (ret || bctx == NULL)
492 sref = krec->nr_buffer;
493 srel = krec->nr_reloc;
495 DRMLISTDEL(&bctx->head);
496 DRMLISTADD(&bctx->head, &nvpb->bctx_list);
498 DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
499 kref = pushbuf_kref(push, bref->bo, bref->flags);
506 pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
508 pushbuf_krel(push, bref->bo, bref->data, bref->flags,
509 bref->vor, bref->tor);
514 DRMLISTJOIN(&bctx->pending, &bctx->current);
515 DRMINITLISTHEAD(&bctx->pending);
518 pushbuf_refn_fail(push, sref, srel);
521 return pushbuf_validate(push, false);
529 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
530 int nr, uint32_t size, bool immediate,
531 struct nouveau_pushbuf **ppush)
533 struct nouveau_device *dev = client->device;
534 struct nouveau_fifo *fifo = chan->data;
535 struct nouveau_pushbuf_priv *nvpb;
536 struct nouveau_pushbuf *push;
537 struct drm_nouveau_gem_pushbuf req;
540 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
543 /* nop pushbuf call, to get the current "return to main" sequence
544 * we need to append to the pushbuf on early chipsets
546 req.channel = fifo->channel;
548 ret = drmCommandWriteRead(dev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
553 nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
558 nvpb->suffix0 = req.suffix0;
559 nvpb->suffix1 = req.suffix1;
561 nvpb->suffix0 = 0xffffffff;
562 nvpb->suffix1 = 0xffffffff;
564 nvpb->krec = calloc(1, sizeof(*nvpb->krec));
565 nvpb->list = nvpb->krec;
572 push->client = client;
573 push->channel = immediate ? chan : NULL;
574 push->flags = NOUVEAU_BO_RD;
575 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
576 push->flags |= NOUVEAU_BO_VRAM;
577 nvpb->type = NOUVEAU_BO_VRAM;
579 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
580 push->flags |= NOUVEAU_BO_GART;
581 nvpb->type = NOUVEAU_BO_GART;
583 nvpb->type |= NOUVEAU_BO_MAP;
585 for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
586 ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
587 NULL, &nvpb->bos[nvpb->bo_nr]);
589 nouveau_pushbuf_del(&push);
594 DRMINITLISTHEAD(&nvpb->bctx_list);
600 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
602 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
604 struct drm_nouveau_gem_pushbuf_bo *kref;
605 struct nouveau_pushbuf_krec *krec;
606 while ((krec = nvpb->list)) {
608 while (krec->nr_buffer--) {
609 unsigned long priv = kref++->user_priv;
610 struct nouveau_bo *bo = (void *)priv;
611 cli_kref_set(nvpb->base.client, bo, NULL, NULL);
612 nouveau_bo_ref(NULL, &bo);
614 nvpb->list = krec->next;
617 while (nvpb->bo_nr--)
618 nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
619 nouveau_bo_ref(NULL, &nvpb->bo);
625 struct nouveau_bufctx *
626 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
628 struct nouveau_bufctx *prev = push->bufctx;
634 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
635 uint32_t dwords, uint32_t relocs, uint32_t pushes)
637 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
638 struct nouveau_pushbuf_krec *krec = nvpb->krec;
639 struct nouveau_client *client = push->client;
640 struct nouveau_bo *bo = NULL;
641 bool flushed = false;
644 /* switch to next buffer if insufficient space in the current one */
645 if (push->cur + dwords >= push->end) {
646 if (nvpb->bo_next < nvpb->bo_nr) {
647 nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
648 if (nvpb->bo_next == nvpb->bo_nr && push->channel)
651 ret = nouveau_bo_new(client->device, nvpb->type, 0,
652 nvpb->bos[0]->size, NULL, &bo);
658 /* make sure there's always enough space to queue up the pending
659 * data in the pushbuf proper
663 /* need to flush if we've run out of space on an immediate pushbuf,
664 * if the new buffer won't fit, or if the kernel push/reloc limits
667 if ((bo && ( push->channel ||
668 !pushbuf_kref(push, bo, push->flags))) ||
669 krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
670 krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
671 if (nvpb->bo && krec->nr_buffer)
676 /* if necessary, switch to new buffer */
678 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
682 nouveau_pushbuf_data(push, NULL, 0, 0);
683 nouveau_bo_ref(bo, &nvpb->bo);
684 nouveau_bo_ref(NULL, &bo);
686 nvpb->bgn = nvpb->bo->map;
687 nvpb->ptr = nvpb->bgn;
688 push->cur = nvpb->bgn;
689 push->end = push->cur + (nvpb->bo->size / 4);
690 push->end -= 2 + push->rsvd_kick; /* space for suffix */
693 pushbuf_kref(push, nvpb->bo, push->flags);
694 return flushed ? pushbuf_validate(push, false) : 0;
698 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
699 uint64_t offset, uint64_t length)
701 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
702 struct nouveau_pushbuf_krec *krec = nvpb->krec;
703 struct drm_nouveau_gem_pushbuf_push *kpsh;
704 struct drm_nouveau_gem_pushbuf_bo *kref;
706 if (bo != nvpb->bo && nvpb->bgn != push->cur) {
707 if (nvpb->suffix0 || nvpb->suffix1) {
708 *push->cur++ = nvpb->suffix0;
709 *push->cur++ = nvpb->suffix1;
712 nouveau_pushbuf_data(push, nvpb->bo,
713 (nvpb->bgn - nvpb->ptr) * 4,
714 (push->cur - nvpb->bgn) * 4);
715 nvpb->bgn = push->cur;
719 kref = cli_kref_get(push->client, bo);
720 kpsh = &krec->push[krec->nr_push++];
721 kpsh->bo_index = kref - krec->buffer;
722 kpsh->offset = offset;
723 kpsh->length = length;
728 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
729 struct nouveau_pushbuf_refn *refs, int nr)
731 return pushbuf_refn(push, true, refs, nr);
735 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
736 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
738 *push->cur++ = pushbuf_krel(push, bo, data, flags, vor, tor);
742 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
744 return pushbuf_validate(push, true);
748 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
750 struct drm_nouveau_gem_pushbuf_bo *kref;
753 if (cli_push_get(push->client, bo) == push) {
754 kref = cli_kref_get(push->client, bo);
755 if (kref->read_domains)
756 flags |= NOUVEAU_BO_RD;
757 if (kref->write_domains)
758 flags |= NOUVEAU_BO_WR;
765 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
768 return pushbuf_submit(push, chan);
770 return pushbuf_validate(push, false);