struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
return -ENOMEM;
if (new_bo != old_bo) {
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
goto fail_free;
}
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
- TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
&nv_crtc->cursor.nvbo);
if (!ret) {
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
if (!fb || !fb->obj[0])
continue;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
if (!nv_crtc->cursor.nvbo)
continue;
- ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
+ NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret)
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
return ret;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
return ret;
dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) {
- ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret) {
ret = nouveau_bo_map(disp->sync);
if (ret)
return 0;
nvbo = nouveau_gem_object(fb->obj[0]);
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret)
return ret;
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy);
if (ret == 0)
- ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
+ false);
if (ret)
goto done;
}
static void
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, u64 *size)
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device;
}
struct nouveau_bo *
-nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags)
{
struct nouveau_drm *drm = cli->drm;
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new().
*/
- if (flags & TTM_PL_FLAG_UNCACHED) {
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't.
*/
* Skip page sizes that can't support needed domains.
*/
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
- (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue;
- if ((flags & TTM_PL_FLAG_TT) &&
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue;
}
nvbo->page = vmm->page[pi].shift;
- nouveau_bo_fixup_align(nvbo, flags, align, size);
+ nouveau_bo_fixup_align(nvbo, align, size);
return nvbo;
}
int
-nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
- nouveau_bo_placement_set(nvbo, flags, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
+ uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret)
return ret;
}
static void
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain,
+ uint32_t flags)
{
*n = 0;
- if (type & TTM_PL_FLAG_VRAM)
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
- if (type & TTM_PL_FLAG_TT)
+ if (domain & NOUVEAU_GEM_DOMAIN_GART)
pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
- if (type & TTM_PL_FLAG_SYSTEM)
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU)
pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
}
static void
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
- nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
+ nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
* Make sure that the color and depth buffers are handled
}
void
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
+ uint32_t busy)
{
struct ttm_placement *pl = &nvbo->placement;
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement,
- type, flags);
+ domain, flags);
pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
- type | busy, flags);
+ domain | busy, flags);
- set_placement_range(nvbo, type);
+ set_placement_range(nvbo, domain);
}
int
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- memtype == TTM_PL_FLAG_VRAM && contig) {
+ domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) {
nvbo->contig = true;
force = true;
}
if (nvbo->pin_refcnt) {
- if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
+ bool error = evict;
+
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
+ break;
+ case TTM_PL_TT:
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
+ default:
+ break;
+ }
+
+ if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
- 1 << bo->mem.mem_type, memtype);
+ bo->mem.mem_type, domain);
ret = -EBUSY;
}
nvbo->pin_refcnt++;
}
if (evict) {
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
}
nvbo->pin_refcnt++;
- nouveau_bo_placement_set(nvbo, memtype, 0);
+ nouveau_bo_placement_set(nvbo, domain, 0);
/* drop pin_refcnt temporarily, so we don't trip the assertion
* in nouveau_bo_move() that makes sure we're not trying to
if (ref)
goto out;
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
+ switch (bo->mem.mem_type) {
+ case TTM_PL_VRAM:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
+ break;
+ case TTM_PL_TT:
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+ break;
+ default:
+ break;
+ }
ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
- TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ NOUVEAU_GEM_DOMAIN_CPU);
break;
default:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break;
}
return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
+ 0);
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
nvbo->busy_placements[i].lpfn = mappable;
}
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false);
}
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
- u32 flags, u32 tile_mode, u32 tile_flags);
-int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags,
+ u32 domain, u32 tile_mode, u32 tile_flags);
+int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj);
-int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
+int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj,
struct nouveau_bo **);
}
static inline int
-nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags,
+nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
struct nouveau_bo **pnvbo)
{
- int ret = nouveau_bo_new(cli, size, align, flags,
+ int ret = nouveau_bo_new(cli, size, align, domain,
0, 0, NULL, NULL, pnvbo);
if (ret == 0) {
- ret = nouveau_bo_pin(*pnvbo, flags, true);
+ ret = nouveau_bo_pin(*pnvbo, domain, true);
if (ret == 0) {
ret = nouveau_bo_map(*pnvbo);
if (ret == 0)
atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */
- target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
if (nouveau_vram_pushbuf)
- target = TTM_PL_FLAG_VRAM;
+ target = NOUVEAU_GEM_DOMAIN_VRAM;
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer);
chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
- TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
+ NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
goto out_release;
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret)
goto out_bo_free;
mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
}
if (ret)
goto out_unref;
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) {
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
goto out_unref;
{
struct nouveau_drm *drm = cli->drm;
struct nouveau_bo *nvbo;
- u32 flags = 0;
int ret;
- if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
- flags |= TTM_PL_FLAG_VRAM;
- if (domain & NOUVEAU_GEM_DOMAIN_GART)
- flags |= TTM_PL_FLAG_TT;
- if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
- flags |= TTM_PL_FLAG_SYSTEM;
+ if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
+ domain |= NOUVEAU_GEM_DOMAIN_CPU;
- if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
- flags |= TTM_PL_FLAG_UNCACHED;
-
- nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags);
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
return ret;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL);
+ ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
return ret;
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains & nvbo->valid_domains &
(write_domains ? write_domains : read_domains);
- uint32_t pref_flags = 0, valid_flags = 0;
+ uint32_t pref_domains = 0;;
if (!domains)
return -EINVAL;
- if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
- valid_flags |= TTM_PL_FLAG_VRAM;
-
- if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
- valid_flags |= TTM_PL_FLAG_TT;
+ valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
- pref_flags |= TTM_PL_FLAG_VRAM;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else
- pref_flags |= TTM_PL_FLAG_TT;
+ pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
- nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
+ nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
return 0;
}
struct nouveau_bo *nvbo;
struct dma_resv *robj = attach->dmabuf->resv;
u64 size = attach->dmabuf->size;
- u32 flags = 0;
int align = 0;
int ret;
- flags = TTM_PL_FLAG_TT;
-
dma_resv_lock(robj, NULL);
- nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0);
+ nvbo = nouveau_bo_alloc(&drm->client, &size, &align,
+ NOUVEAU_GEM_DOMAIN_GART, 0, 0);
if (IS_ERR(nvbo)) {
obj = ERR_CAST(nvbo);
goto unlock;
goto unlock;
}
- ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
+ ret = nouveau_bo_init(nvbo, size, align, NOUVEAU_GEM_DOMAIN_GART,
+ sg, robj);
if (ret) {
nouveau_bo_ref(NULL, &nvbo);
obj = ERR_PTR(ret);
int ret;
/* pin buffer into GTT */
- ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
+ ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_GART, false);
if (ret)
return -EINVAL;
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
priv->base.context_del = nv10_fence_context_del;
spin_lock_init(&priv->lock);
- ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
+ NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &priv->bo);
if (!ret) {
- ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(priv->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) {
ret = nouveau_bo_map(priv->bo);
if (ret)
mutex_init(&priv->mutex);
/* Use VRAM if there is any ; otherwise fallback to system memory */
- domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
- /*
- * fences created in sysmem must be non-cached or we
- * will lose CPU/GPU coherency!
- */
- TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
+ domain = drm->client.device.info.ram_size != 0 ?
+ NOUVEAU_GEM_DOMAIN_VRAM :
+ /*
+ * fences created in sysmem must be non-cached or we
+ * will lose CPU/GPU coherency!
+ */
+ NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
ret = nouveau_bo_new(&drm->client, 16 * drm->chan.nr, 0,
domain, 0, 0, NULL, NULL, &priv->bo);
if (ret == 0) {