From: Ben Skeggs Date: Mon, 1 Nov 2010 01:45:02 +0000 (+1000) Subject: drm/nouveau: rework gpu-specific instmem interfaces X-Git-Tag: v3.0~2447^2~37^2~30 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=e41115d0ad5c40a7ea4d85b1c77b4c02185a5581;p=platform%2Fkernel%2Flinux-amlogic.git drm/nouveau: rework gpu-specific instmem interfaces Reviewed-by: Francisco Jerez Signed-off-by: Ben Skeggs --- diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 18a611e..822cd40 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -146,15 +146,16 @@ enum nouveau_flags { #define NVOBJ_FLAG_ZERO_ALLOC (1 << 1) #define NVOBJ_FLAG_ZERO_FREE (1 << 2) + +#define NVOBJ_CINST_GLOBAL 0xdeadbeef + struct nouveau_gpuobj { struct drm_device *dev; struct kref refcount; struct list_head list; - struct drm_mm_node *im_pramin; - struct nouveau_bo *im_backing; + void *node; u32 *suspend; - int im_bound; uint32_t flags; @@ -288,11 +289,11 @@ struct nouveau_instmem_engine { int (*suspend)(struct drm_device *dev); void (*resume)(struct drm_device *dev); - int (*populate)(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); - void (*clear)(struct drm_device *, struct nouveau_gpuobj *); - int (*bind)(struct drm_device *, struct nouveau_gpuobj *); - int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); + int (*get)(struct nouveau_gpuobj *, u32 size, u32 align); + void (*put)(struct nouveau_gpuobj *); + int (*map)(struct nouveau_gpuobj *); + void (*unmap)(struct nouveau_gpuobj *); + void (*flush)(struct drm_device *); }; @@ -1182,11 +1183,10 @@ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); extern int nv04_instmem_suspend(struct drm_device *); extern void nv04_instmem_resume(struct drm_device *); -extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv04_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv04_instmem_put(struct nouveau_gpuobj *); +extern int nv04_instmem_map(struct nouveau_gpuobj *); +extern void nv04_instmem_unmap(struct nouveau_gpuobj *); extern void nv04_instmem_flush(struct drm_device *); /* nv50_instmem.c */ @@ -1194,11 +1194,10 @@ extern int nv50_instmem_init(struct drm_device *); extern void nv50_instmem_takedown(struct drm_device *); extern int nv50_instmem_suspend(struct drm_device *); extern void nv50_instmem_resume(struct drm_device *); -extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nv50_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nv50_instmem_put(struct nouveau_gpuobj *); +extern int nv50_instmem_map(struct nouveau_gpuobj *); +extern void nv50_instmem_unmap(struct nouveau_gpuobj *); extern void nv50_instmem_flush(struct drm_device *); extern void nv84_instmem_flush(struct drm_device *); extern void nv50_vm_flush(struct drm_device *, int engine); @@ -1208,11 +1207,10 @@ extern int nvc0_instmem_init(struct drm_device *); extern void nvc0_instmem_takedown(struct drm_device *); extern int nvc0_instmem_suspend(struct drm_device *); extern void nvc0_instmem_resume(struct drm_device *); -extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, - u32 *size, u32 align); -extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); -extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); +extern int nvc0_instmem_get(struct nouveau_gpuobj *, u32 size, u32 align); +extern void nvc0_instmem_put(struct nouveau_gpuobj *); +extern int nvc0_instmem_map(struct nouveau_gpuobj *); +extern void nvc0_instmem_unmap(struct nouveau_gpuobj *); extern void nvc0_instmem_flush(struct drm_device *); /* nv04_mc.c */ diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 8c5e35c..e8c74de 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -168,17 +168,14 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct nouveau_gpuobj **gpuobj_ret) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *gpuobj; struct drm_mm_node *ramin = NULL; - int ret; + int ret, i; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", chan ? chan->id : -1, size, align, flags); - if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL) - return -EINVAL; - gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL); if (!gpuobj) return -ENOMEM; @@ -193,88 +190,45 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, spin_unlock(&dev_priv->ramin_lock); if (chan) { - NV_DEBUG(dev, "channel heap\n"); - ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0); if (ramin) ramin = drm_mm_get_block(ramin, size, align); - if (!ramin) { nouveau_gpuobj_ref(NULL, &gpuobj); return -ENOMEM; } - } else { - NV_DEBUG(dev, "global heap\n"); - - /* allocate backing pages, sets vinst */ - ret = engine->instmem.populate(dev, gpuobj, &size, align); - if (ret) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return ret; - } - /* try and get aperture space */ - do { - if (drm_mm_pre_get(&dev_priv->ramin_heap)) - return -ENOMEM; - - spin_lock(&dev_priv->ramin_lock); - ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, - align, 0); - if (ramin == NULL) { - spin_unlock(&dev_priv->ramin_lock); - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - - ramin = drm_mm_get_block_atomic(ramin, size, align); - spin_unlock(&dev_priv->ramin_lock); - } while (ramin == NULL); + gpuobj->pinst = chan->ramin->pinst; + if (gpuobj->pinst != ~0) + gpuobj->pinst += ramin->start; - /* on nv50 it's ok to fail, we have a fallback path */ - if (!ramin && dev_priv->card_type < NV_50) { - nouveau_gpuobj_ref(NULL, &gpuobj); - return -ENOMEM; - } - } + if (dev_priv->card_type < NV_50) + gpuobj->cinst = gpuobj->pinst; + else + gpuobj->cinst = ramin->start; - /* if we got a chunk of the aperture, map pages into it */ - gpuobj->im_pramin = ramin; - if (!chan && gpuobj->im_pramin && dev_priv->ramin_available) { - ret = engine->instmem.bind(dev, gpuobj); + gpuobj->vinst = ramin->start + chan->ramin->vinst; + gpuobj->node = ramin; + } else { + ret = instmem->get(gpuobj, size, align); if (ret) { nouveau_gpuobj_ref(NULL, &gpuobj); return ret; } - } - - /* calculate the various different addresses for the object */ - if (chan) { - gpuobj->pinst = chan->ramin->pinst; - if (gpuobj->pinst != ~0) - gpuobj->pinst += gpuobj->im_pramin->start; - if (dev_priv->card_type < NV_50) { - gpuobj->cinst = gpuobj->pinst; - } else { - gpuobj->cinst = gpuobj->im_pramin->start; - gpuobj->vinst = gpuobj->im_pramin->start + - chan->ramin->vinst; - } - } else { - if (gpuobj->im_pramin) - gpuobj->pinst = gpuobj->im_pramin->start; - else + ret = -ENOSYS; + if (dev_priv->ramin_available) + ret = instmem->map(gpuobj); + if (ret) gpuobj->pinst = ~0; - gpuobj->cinst = 0xdeadbeef; + + gpuobj->cinst = NVOBJ_CINST_GLOBAL; } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - int i; - for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } @@ -326,26 +280,34 @@ nouveau_gpuobj_del(struct kref *ref) container_of(ref, struct nouveau_gpuobj, refcount); struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_engine *engine = &dev_priv->engine; + struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; int i; NV_DEBUG(dev, "gpuobj %p\n", gpuobj); - if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { + if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { for (i = 0; i < gpuobj->size; i += 4) nv_wo32(gpuobj, i, 0); - engine->instmem.flush(dev); + instmem->flush(dev); } if (gpuobj->dtor) gpuobj->dtor(dev, gpuobj); - if (gpuobj->im_backing) - engine->instmem.clear(dev, gpuobj); + if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) { + if (gpuobj->node) { + instmem->unmap(gpuobj); + instmem->put(gpuobj); + } + } else { + if (gpuobj->node) { + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + spin_unlock(&dev_priv->ramin_lock); + } + } spin_lock(&dev_priv->ramin_lock); - if (gpuobj->im_pramin) - drm_mm_put_block(gpuobj->im_pramin); list_del(&gpuobj->list); spin_unlock(&dev_priv->ramin_lock); @@ -385,7 +347,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst, kref_init(&gpuobj->refcount); gpuobj->size = size; gpuobj->pinst = pinst; - gpuobj->cinst = 0xdeadbeef; + gpuobj->cinst = NVOBJ_CINST_GLOBAL; gpuobj->vinst = vinst; if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { @@ -935,7 +897,7 @@ nouveau_gpuobj_suspend(struct drm_device *dev) int i; list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) { - if (gpuobj->cinst != 0xdeadbeef) + if (gpuobj->cinst != NVOBJ_CINST_GLOBAL) continue; gpuobj->suspend = vmalloc(gpuobj->size); diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b26b34c..b42e29d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -53,10 +53,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -106,10 +106,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -163,10 +163,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -220,10 +220,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; @@ -280,10 +280,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv04_instmem_takedown; engine->instmem.suspend = nv04_instmem_suspend; engine->instmem.resume = nv04_instmem_resume; - engine->instmem.populate = nv04_instmem_populate; - engine->instmem.clear = nv04_instmem_clear; - engine->instmem.bind = nv04_instmem_bind; - engine->instmem.unbind = nv04_instmem_unbind; + engine->instmem.get = nv04_instmem_get; + engine->instmem.put = nv04_instmem_put; + engine->instmem.map = nv04_instmem_map; + engine->instmem.unmap = nv04_instmem_unmap; engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; @@ -343,10 +343,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nv50_instmem_takedown; engine->instmem.suspend = nv50_instmem_suspend; engine->instmem.resume = nv50_instmem_resume; - engine->instmem.populate = nv50_instmem_populate; - engine->instmem.clear = nv50_instmem_clear; - engine->instmem.bind = nv50_instmem_bind; - engine->instmem.unbind = nv50_instmem_unbind; + engine->instmem.get = nv50_instmem_get; + engine->instmem.put = nv50_instmem_put; + engine->instmem.map = nv50_instmem_map; + engine->instmem.unmap = nv50_instmem_unmap; if (dev_priv->chipset == 0x50) engine->instmem.flush = nv50_instmem_flush; else @@ -449,10 +449,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.takedown = nvc0_instmem_takedown; engine->instmem.suspend = nvc0_instmem_suspend; engine->instmem.resume = nvc0_instmem_resume; - engine->instmem.populate = nvc0_instmem_populate; - engine->instmem.clear = nvc0_instmem_clear; - engine->instmem.bind = nvc0_instmem_bind; - engine->instmem.unbind = nvc0_instmem_unbind; + engine->instmem.get = nvc0_instmem_get; + engine->instmem.put = nvc0_instmem_put; + engine->instmem.map = nvc0_instmem_map; + engine->instmem.unmap = nvc0_instmem_unmap; engine->instmem.flush = nvc0_instmem_flush; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index 554e55d..b8e3edb 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -98,42 +98,66 @@ nv04_instmem_takedown(struct drm_device *dev) } int -nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nv04_instmem_suspend(struct drm_device *dev) { return 0; } void -nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) -{ -} - -int -nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_resume(struct drm_device *dev) { - return 0; } int -nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv04_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct drm_mm_node *ramin = NULL; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, size, align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + gpuobj->node = ramin; + gpuobj->vinst = ramin->start; return 0; } void -nv04_instmem_flush(struct drm_device *dev) +nv04_instmem_put(struct nouveau_gpuobj *gpuobj) { + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(gpuobj->node); + gpuobj->node = NULL; + spin_unlock(&dev_priv->ramin_lock); } int -nv04_instmem_suspend(struct drm_device *dev) +nv04_instmem_map(struct nouveau_gpuobj *gpuobj) { + gpuobj->pinst = gpuobj->vinst; return 0; } void -nv04_instmem_resume(struct drm_device *dev) +nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj) { } +void +nv04_instmem_flush(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 1640c12..8716095 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -157,10 +157,7 @@ nv50_instmem_init(struct drm_device *dev) nv_wo32(priv->pramin_bar, 0x10, 0x00000000); nv_wo32(priv->pramin_bar, 0x14, 0x00000000); - /* map channel into PRAMIN, gpuobj didn't do it for us */ - ret = nv50_instmem_bind(dev, chan->ramin); - if (ret) - return ret; + nv50_instmem_map(chan->ramin); /* poke regs... */ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12)); @@ -305,72 +302,91 @@ nv50_instmem_resume(struct drm_device *dev) dev_priv->ramin_available = true; } +struct nv50_gpuobj_node { + struct nouveau_bo *vram; + struct drm_mm_node *ramin; + u32 align; +}; + + int -nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct nv50_gpuobj_node *node = NULL; int ret; - if (gpuobj->im_backing) - return -EINVAL; - - *size = ALIGN(*size, 4096); - if (*size == 0) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); + nouveau_bo_ref(NULL, &node->vram); return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; + gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->node = node; return 0; } void -nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nv50_gpuobj_node *node; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; - } + node = gpuobj->node; + gpuobj->node = NULL; + + nouveau_bo_unpin(node->vram); + nouveau_bo_ref(NULL, &node->vram); + kfree(node); } int -nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - struct nouveau_gpuobj *pramin_pt = priv->pramin_pt; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; + struct nv50_gpuobj_node *node = gpuobj->node; + struct drm_device *dev = gpuobj->dev; + struct drm_mm_node *ramin = NULL; + u32 pte, pte_end; + u64 vram; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, + node->align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); + ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + pte = (ramin->start >> 12) << 1; + pte_end = ((ramin->size >> 12) << 1) + pte; vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); + ramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); vram |= 1; @@ -380,8 +396,8 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } while (pte < pte_end) { - nv_wo32(pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); - nv_wo32(pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); + nv_wo32(priv->pramin_pt, (pte * 4) + 0, lower_32_bits(vram)); + nv_wo32(priv->pramin_pt, (pte * 4) + 4, upper_32_bits(vram)); vram += 0x1000; pte += 2; } @@ -389,36 +405,36 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv50_vm_flush(dev, 6); - gpuobj->im_bound = 1; + node->ramin = ramin; + gpuobj->pinst = ramin->start; return 0; } -int -nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - uint32_t pte, pte_end; - - if (gpuobj->im_bound == 0) - return -EINVAL; + struct nv50_gpuobj_node *node = gpuobj->node; + u32 pte, pte_end; - /* can happen during late takedown */ - if (unlikely(!dev_priv->ramin_available)) - return 0; + if (!node->ramin || !dev_priv->ramin_available) + return; - pte = (gpuobj->im_pramin->start >> 12) << 1; - pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; + pte = (node->ramin->start >> 12) << 1; + pte_end = ((node->ramin->size >> 12) << 1) + pte; while (pte < pte_end) { nv_wo32(priv->pramin_pt, (pte * 4) + 0, 0x00000000); nv_wo32(priv->pramin_pt, (pte * 4) + 4, 0x00000000); pte += 2; } - dev_priv->engine.instmem.flush(dev); + dev_priv->engine.instmem.flush(gpuobj->dev); - gpuobj->im_bound = 0; - return 0; + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(node->ramin); + node->ramin = NULL; + spin_unlock(&dev_priv->ramin_lock); } void diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c index 7b4e71f..3923208 100644 --- a/drivers/gpu/drm/nouveau/nvc0_instmem.c +++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c @@ -26,67 +26,89 @@ #include "nouveau_drv.h" +struct nvc0_gpuobj_node { + struct nouveau_bo *vram; + struct drm_mm_node *ramin; + u32 align; +}; + int -nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, - u32 *size, u32 align) +nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align) { + struct drm_device *dev = gpuobj->dev; + struct nvc0_gpuobj_node *node = NULL; int ret; - *size = ALIGN(*size, 4096); - if (*size == 0) - return -EINVAL; + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + node->align = align; - ret = nouveau_bo_new(dev, NULL, *size, align, TTM_PL_FLAG_VRAM, - 0, 0x0000, true, false, &gpuobj->im_backing); + ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM, + 0, 0x0000, true, false, &node->vram); if (ret) { NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret); return ret; } - ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM); + ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM); if (ret) { NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret); - nouveau_bo_ref(NULL, &gpuobj->im_backing); + nouveau_bo_ref(NULL, &node->vram); return ret; } - gpuobj->vinst = gpuobj->im_backing->bo.mem.start << PAGE_SHIFT; + gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT; + gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT; + gpuobj->node = node; return 0; } void -nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_put(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvc0_gpuobj_node *node; - if (gpuobj && gpuobj->im_backing) { - if (gpuobj->im_bound) - dev_priv->engine.instmem.unbind(dev, gpuobj); - nouveau_bo_unpin(gpuobj->im_backing); - nouveau_bo_ref(NULL, &gpuobj->im_backing); - gpuobj->im_backing = NULL; - } + node = gpuobj->node; + gpuobj->node = NULL; + + nouveau_bo_unpin(node->vram); + nouveau_bo_ref(NULL, &node->vram); + kfree(node); } int -nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +nvc0_instmem_map(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; - uint64_t vram; - - if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) - return -EINVAL; - - NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", - gpuobj->im_pramin->start, gpuobj->im_pramin->size); - - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + struct drm_device *dev = gpuobj->dev; + struct drm_mm_node *ramin = NULL; + u32 pte, pte_end; + u64 vram; + + do { + if (drm_mm_pre_get(&dev_priv->ramin_heap)) + return -ENOMEM; + + spin_lock(&dev_priv->ramin_lock); + ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size, + node->align, 0); + if (ramin == NULL) { + spin_unlock(&dev_priv->ramin_lock); + return -ENOMEM; + } + + ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align); + spin_unlock(&dev_priv->ramin_lock); + } while (ramin == NULL); + + pte = (ramin->start >> 12) << 1; + pte_end = ((ramin->size >> 12) << 1) + pte; vram = gpuobj->vinst; NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", - gpuobj->im_pramin->start, pte, pte_end); + ramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst); while (pte < pte_end) { @@ -103,30 +125,35 @@ nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) nv_wr32(dev, 0x100cbc, 0x80000005); } - gpuobj->im_bound = 1; + node->ramin = ramin; + gpuobj->pinst = ramin->start; return 0; } -int -nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) +void +nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t pte, pte_end; + struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; + struct nvc0_gpuobj_node *node = gpuobj->node; + u32 pte, pte_end; - if (gpuobj->im_bound == 0) - return -EINVAL; + if (!node->ramin || !dev_priv->ramin_available) + return; + + pte = (node->ramin->start >> 12) << 1; + pte_end = ((node->ramin->size >> 12) << 1) + pte; - pte = gpuobj->im_pramin->start >> 12; - pte_end = (gpuobj->im_pramin->size >> 12) + pte; while (pte < pte_end) { - nv_wr32(dev, 0x702000 + (pte * 8), 0); - nv_wr32(dev, 0x702004 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0); + nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0); pte++; } - dev_priv->engine.instmem.flush(dev); + dev_priv->engine.instmem.flush(gpuobj->dev); - gpuobj->im_bound = 0; - return 0; + spin_lock(&dev_priv->ramin_lock); + drm_mm_put_block(node->ramin); + node->ramin = NULL; + spin_unlock(&dev_priv->ramin_lock); } void