void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+ int (*object_new)(struct nouveau_channel *chan, u32 handle, u16 class);
void (*tlb_flush)(struct drm_device *dev);
void (*set_tile_region)(struct drm_device *dev, int i);
void (*takedown)(struct drm_device *);
int (*create_context)(struct nouveau_channel *);
void (*destroy_context)(struct nouveau_channel *);
+ int (*object_new)(struct nouveau_channel *, u32 handle, u16 class);
void (*tlb_flush)(struct drm_device *dev);
};
extern void nv04_graph_destroy_context(struct nouveau_channel *);
extern int nv04_graph_load_context(struct nouveau_channel *);
extern int nv04_graph_unload_context(struct drm_device *);
+extern int nv04_graph_object_new(struct nouveau_channel *, u32, u16);
extern int nv04_graph_mthd_page_flip(struct nouveau_channel *chan,
u32 class, u32 mthd, u32 data);
extern struct nouveau_bitfield nv04_graph_nsource[];
extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
+extern int nv40_graph_object_new(struct nouveau_channel *, u32, u16);
extern void nv40_grctx_init(struct nouveau_grctx *);
extern void nv40_graph_set_tile_region(struct drm_device *dev, int i);
extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
+extern int nv50_graph_object_new(struct nouveau_channel *, u32, u16);
extern int nv50_grctx_init(struct nouveau_grctx *);
extern void nv50_graph_tlb_flush(struct drm_device *dev);
extern void nv84_graph_tlb_flush(struct drm_device *dev);
extern void nvc0_graph_destroy_context(struct nouveau_channel *);
extern int nvc0_graph_load_context(struct nouveau_channel *);
extern int nvc0_graph_unload_context(struct drm_device *);
+extern int nvc0_graph_object_new(struct nouveau_channel *, u32, u16);
/* nv84_crypt.c */
extern int nv84_crypt_init(struct drm_device *dev);
extern int nv84_crypt_create_context(struct nouveau_channel *);
extern void nv84_crypt_destroy_context(struct nouveau_channel *);
extern void nv84_crypt_tlb_flush(struct drm_device *dev);
+extern int nv84_crypt_object_new(struct nouveau_channel *, u32, u16);
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
return 0;
}
-
-static uint32_t
-nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /*XXX: dodgy hack for now */
- if (dev_priv->card_type >= NV_50)
- return 24;
- if (dev_priv->card_type >= NV_40)
- return 32;
- return 16;
-}
-
/*
DMA objects are used to reference a piece of memory in the
framebuffer, PCI or AGP address space. Each object is 16 bytes big
set to 0?
*/
static int
-nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
- struct nouveau_gpuobj **gpuobj_ret)
+nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
struct nouveau_gpuobj *gpuobj;
+ int ret;
gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
if (!gpuobj)
spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
spin_unlock(&dev_priv->ramin_lock);
- *gpuobj_ret = gpuobj;
- return 0;
+
+ ret = nouveau_ramht_insert(chan, handle, gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
}
int
nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
{
struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt;
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj_class *oc;
- struct nouveau_gpuobj *gpuobj;
int ret;
NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
found:
switch (oc->engine) {
case NVOBJ_ENGINE_SW:
- if (dev_priv->card_type < NV_C0) {
- ret = nouveau_gpuobj_sw_new(chan, class, &gpuobj);
- if (ret)
- return ret;
- goto insert;
- }
- break;
+ return nouveau_gpuobj_sw_new(chan, handle, class);
case NVOBJ_ENGINE_GR:
if ((dev_priv->card_type >= NV_20 && !chan->ramin_grctx) ||
(dev_priv->card_type < NV_20 && !chan->pgraph_ctx)) {
- struct nouveau_pgraph_engine *pgraph =
- &dev_priv->engine.graph;
-
ret = pgraph->create_context(chan);
if (ret)
return ret;
}
- break;
+
+ return pgraph->object_new(chan, handle, class);
case NVOBJ_ENGINE_CRYPT:
if (!chan->crypt_ctx) {
- struct nouveau_crypt_engine *pcrypt =
- &dev_priv->engine.crypt;
-
ret = pcrypt->create_context(chan);
if (ret)
return ret;
}
- break;
- }
-
- /* we're done if this is fermi */
- if (dev_priv->card_type >= NV_C0)
- return 0;
-
- ret = nouveau_gpuobj_new(dev, chan,
- nouveau_gpuobj_class_instmem_size(dev, class),
- 16,
- NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
- &gpuobj);
- if (ret) {
- NV_ERROR(dev, "error creating gpuobj: %d\n", ret);
- return ret;
- }
- if (dev_priv->card_type >= NV_50) {
- nv_wo32(gpuobj, 0, class);
- nv_wo32(gpuobj, 20, 0x00010000);
- } else {
- switch (class) {
- case NV_CLASS_NULL:
- nv_wo32(gpuobj, 0, 0x00001030);
- nv_wo32(gpuobj, 4, 0xFFFFFFFF);
- break;
- default:
- if (dev_priv->card_type >= NV_40) {
- nv_wo32(gpuobj, 0, class);
-#ifdef __BIG_ENDIAN
- nv_wo32(gpuobj, 8, 0x01000000);
-#endif
- } else {
-#ifdef __BIG_ENDIAN
- nv_wo32(gpuobj, 0, class | 0x00080000);
-#else
- nv_wo32(gpuobj, 0, class);
-#endif
- }
- }
+ return pcrypt->object_new(chan, handle, class);
}
- dev_priv->engine.instmem.flush(dev);
- gpuobj->engine = oc->engine;
- gpuobj->class = oc->id;
-
-insert:
- ret = nouveau_ramht_insert(chan, handle, gpuobj);
- if (ret)
- NV_ERROR(dev, "error adding gpuobj to RAMHT: %d\n", ret);
- nouveau_gpuobj_ref(NULL, &gpuobj);
- return ret;
+ BUG_ON(1);
}
static int
engine->graph.destroy_context = nv04_graph_destroy_context;
engine->graph.load_context = nv04_graph_load_context;
engine->graph.unload_context = nv04_graph_unload_context;
+ engine->graph.object_new = nv04_graph_object_new;
engine->fifo.channels = 16;
engine->fifo.init = nv04_fifo_init;
engine->fifo.takedown = nv04_fifo_fini;
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
+ engine->graph.object_new = nv04_graph_object_new;
engine->graph.set_tile_region = nv10_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.object_new = nv04_graph_object_new;
engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.object_new = nv04_graph_object_new;
engine->graph.set_tile_region = nv20_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
+ engine->graph.object_new = nv40_graph_object_new;
engine->graph.set_tile_region = nv40_graph_set_tile_region;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
engine->graph.destroy_context = nv50_graph_destroy_context;
engine->graph.load_context = nv50_graph_load_context;
engine->graph.unload_context = nv50_graph_unload_context;
+ engine->graph.object_new = nv50_graph_object_new;
if (dev_priv->chipset == 0x50 ||
dev_priv->chipset == 0xac)
engine->graph.tlb_flush = nv50_graph_tlb_flush;
engine->crypt.takedown = nv84_crypt_fini;
engine->crypt.create_context = nv84_crypt_create_context;
engine->crypt.destroy_context = nv84_crypt_destroy_context;
+ engine->crypt.object_new = nv84_crypt_object_new;
engine->crypt.tlb_flush = nv84_crypt_tlb_flush;
break;
default:
engine->graph.destroy_context = nvc0_graph_destroy_context;
engine->graph.load_context = nvc0_graph_load_context;
engine->graph.unload_context = nvc0_graph_unload_context;
+ engine->graph.object_new = nvc0_graph_object_new;
engine->fifo.channels = 128;
engine->fifo.init = nvc0_fifo_init;
engine->fifo.takedown = nvc0_fifo_takedown;
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_util.h"
+#include "nouveau_ramht.h"
static int nv04_graph_register(struct drm_device *dev);
static void nv04_graph_isr(struct drm_device *dev);
return 0;
}
+int
+nv04_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+#ifdef __BIG_ENDIAN
+ nv_wo32(obj, 0x00, 0x00080000 | class);
+#else
+ nv_wo32(obj, 0x00, class);
+#endif
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
int nv04_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_grctx.h"
+#include "nouveau_ramht.h"
static int nv40_graph_register(struct drm_device *);
static void nv40_graph_isr(struct drm_device *);
return ret;
}
+int
+nv40_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ nv_wo32(obj, 0x04, 0x00000000);
+#ifdef __BIG_ENDIAN
+ nv_wo32(obj, 0x08, 0x01000000);
+#endif
+ nv_wo32(obj, 0x0c, 0x00000000);
+ nv_wo32(obj, 0x10, 0x00000000);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
void
nv40_graph_set_tile_region(struct drm_device *dev, int i)
{
#include "nouveau_grctx.h"
#include "nouveau_dma.h"
#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
#include "nv50_evo.h"
static int nv50_graph_register(struct drm_device *);
return 0;
}
+int
+nv50_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
static void
nv50_graph_context_switch(struct drm_device *dev)
{
#include "nouveau_drv.h"
#include "nouveau_util.h"
#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
static void nv84_crypt_isr(struct drm_device *);
atomic_dec(&chan->vm->pcrypt_refs);
}
+int
+nv84_crypt_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 5;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
void
nv84_crypt_tlb_flush(struct drm_device *dev)
{
return nvc0_graph_unload_context_to(dev, inst);
}
+int
+nvc0_graph_object_new(struct nouveau_channel *chan, u32 handle, u16 class)
+{
+ return 0;
+}
+
static void
nvc0_graph_destroy(struct drm_device *dev)
{