2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/fifo.h>
27 #include <nvif/ioctl.h>
28 #include <nvif/class.h>
29 #include <nvif/cl0002.h>
30 #include <nvif/unpack.h>
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34 #include "nouveau_gem.h"
35 #include "nouveau_chan.h"
36 #include "nouveau_abi16.h"
37 #include "nouveau_vmm.h"
38 #include "nouveau_sched.h"
40 static struct nouveau_abi16 *
41 nouveau_abi16(struct drm_file *file_priv)
43 struct nouveau_cli *cli = nouveau_cli(file_priv);
45 struct nouveau_abi16 *abi16;
46 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
48 struct nv_device_v0 args = {
52 INIT_LIST_HEAD(&abi16->channels);
54 /* allocate device object targeting client's default
55 * device (ie. the one that belongs to the fd it
58 if (nvif_device_ctor(&cli->base.object, "abi16Device",
59 0, NV_DEVICE, &args, sizeof(args),
70 struct nouveau_abi16 *
71 nouveau_abi16_get(struct drm_file *file_priv)
73 struct nouveau_cli *cli = nouveau_cli(file_priv);
74 mutex_lock(&cli->mutex);
75 if (nouveau_abi16(file_priv))
77 mutex_unlock(&cli->mutex);
82 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
84 struct nouveau_cli *cli = (void *)abi16->device.object.client;
85 mutex_unlock(&cli->mutex);
90 nouveau_abi16_swclass(struct nouveau_drm *drm)
92 switch (drm->client.device.info.family) {
93 case NV_DEVICE_INFO_V0_TNT:
94 return NVIF_CLASS_SW_NV04;
95 case NV_DEVICE_INFO_V0_CELSIUS:
96 case NV_DEVICE_INFO_V0_KELVIN:
97 case NV_DEVICE_INFO_V0_RANKINE:
98 case NV_DEVICE_INFO_V0_CURIE:
99 return NVIF_CLASS_SW_NV10;
100 case NV_DEVICE_INFO_V0_TESLA:
101 return NVIF_CLASS_SW_NV50;
102 case NV_DEVICE_INFO_V0_FERMI:
103 case NV_DEVICE_INFO_V0_KEPLER:
104 case NV_DEVICE_INFO_V0_MAXWELL:
105 case NV_DEVICE_INFO_V0_PASCAL:
106 case NV_DEVICE_INFO_V0_VOLTA:
107 return NVIF_CLASS_SW_GF100;
114 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
115 struct nouveau_abi16_ntfy *ntfy)
117 nvif_object_dtor(&ntfy->object);
118 nvkm_mm_free(&chan->heap, &ntfy->node);
119 list_del(&ntfy->head);
124 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
125 struct nouveau_abi16_chan *chan)
127 struct nouveau_abi16_ntfy *ntfy, *temp;
129 /* When a client exits without waiting for it's queued up jobs to
130 * finish it might happen that we fault the channel. This is due to
131 * drm_file_free() calling drm_gem_release() before the postclose()
132 * callback. Hence, we can't tear down this scheduler entity before
133 * uvmm mappings are unmapped. Currently, we can't detect this case.
135 * However, this should be rare and harmless, since the channel isn't
138 nouveau_sched_entity_fini(&chan->sched_entity);
140 /* wait for all activity to stop before cleaning up */
142 nouveau_channel_idle(chan->chan);
144 /* cleanup notifier state */
145 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
146 nouveau_abi16_ntfy_fini(chan, ntfy);
150 nouveau_vma_del(&chan->ntfy_vma);
151 nouveau_bo_unpin(chan->ntfy);
152 drm_gem_object_put(&chan->ntfy->bo.base);
155 if (chan->heap.block_size)
156 nvkm_mm_fini(&chan->heap);
158 /* destroy channel object, all children will be killed too */
160 nvif_object_dtor(&chan->ce);
161 nouveau_channel_del(&chan->chan);
164 list_del(&chan->head);
169 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
171 struct nouveau_cli *cli = (void *)abi16->device.object.client;
172 struct nouveau_abi16_chan *chan, *temp;
174 /* cleanup channels */
175 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
176 nouveau_abi16_chan_fini(abi16, chan);
179 /* destroy the device object */
180 nvif_device_dtor(&abi16->device);
187 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
189 struct nouveau_cli *cli = nouveau_cli(file_priv);
190 struct nouveau_drm *drm = nouveau_drm(dev);
191 struct nvif_device *device = &drm->client.device;
192 struct nvkm_gr *gr = nvxx_gr(device);
193 struct drm_nouveau_getparam *getparam = data;
194 struct pci_dev *pdev = to_pci_dev(dev->dev);
196 switch (getparam->param) {
197 case NOUVEAU_GETPARAM_CHIPSET_ID:
198 getparam->value = device->info.chipset;
200 case NOUVEAU_GETPARAM_PCI_VENDOR:
201 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
202 getparam->value = pdev->vendor;
206 case NOUVEAU_GETPARAM_PCI_DEVICE:
207 if (device->info.platform != NV_DEVICE_INFO_V0_SOC)
208 getparam->value = pdev->device;
212 case NOUVEAU_GETPARAM_BUS_TYPE:
213 switch (device->info.platform) {
214 case NV_DEVICE_INFO_V0_AGP : getparam->value = 0; break;
215 case NV_DEVICE_INFO_V0_PCI : getparam->value = 1; break;
216 case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break;
217 case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break;
218 case NV_DEVICE_INFO_V0_IGP :
219 if (!pci_is_pcie(pdev))
229 case NOUVEAU_GETPARAM_FB_SIZE:
230 getparam->value = drm->gem.vram_available;
232 case NOUVEAU_GETPARAM_AGP_SIZE:
233 getparam->value = drm->gem.gart_available;
235 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
236 getparam->value = 0; /* deprecated */
238 case NOUVEAU_GETPARAM_PTIMER_TIME:
239 getparam->value = nvif_device_time(device);
241 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
244 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
247 case NOUVEAU_GETPARAM_GRAPH_UNITS:
248 getparam->value = nvkm_gr_units(gr);
251 NV_PRINTK(dbg, cli, "unknown parameter %lld\n", getparam->param);
259 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
261 struct drm_nouveau_channel_alloc *init = data;
262 struct nouveau_cli *cli = nouveau_cli(file_priv);
263 struct nouveau_drm *drm = nouveau_drm(dev);
264 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
265 struct nouveau_abi16_chan *chan;
266 struct nvif_device *device;
270 if (unlikely(!abi16))
274 return nouveau_abi16_put(abi16, -ENODEV);
276 /* If uvmm wasn't initialized until now disable it completely to prevent
277 * userspace from mixing up UAPIs.
279 * The client lock is already acquired by nouveau_abi16_get().
281 __nouveau_cli_disable_uvmm_noinit(cli);
283 device = &abi16->device;
284 engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR;
286 /* hack to allow channel engine type specification on kepler */
287 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
288 if (init->fb_ctxdma_handle == ~0) {
289 switch (init->tt_ctxdma_handle) {
290 case 0x01: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_GR ; break;
291 case 0x02: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPDEC; break;
292 case 0x04: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSPPP ; break;
293 case 0x08: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_MSVLD ; break;
294 case 0x30: engine = NV_DEVICE_HOST_RUNLIST_ENGINES_CE ; break;
296 return nouveau_abi16_put(abi16, -ENOSYS);
299 init->fb_ctxdma_handle = 0;
300 init->tt_ctxdma_handle = 0;
304 if (engine != NV_DEVICE_HOST_RUNLIST_ENGINES_CE)
305 runm = nvif_fifo_runlist(device, engine);
307 runm = nvif_fifo_runlist_ce(device);
309 if (!runm || init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
310 return nouveau_abi16_put(abi16, -EINVAL);
312 /* allocate "abi16 channel" data and make up a handle for it */
313 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
315 return nouveau_abi16_put(abi16, -ENOMEM);
317 INIT_LIST_HEAD(&chan->notifiers);
318 list_add(&chan->head, &abi16->channels);
320 /* create channel object and initialise dma and fence management */
321 ret = nouveau_channel_new(drm, device, false, runm, init->fb_ctxdma_handle,
322 init->tt_ctxdma_handle, &chan->chan);
326 ret = nouveau_sched_entity_init(&chan->sched_entity, &drm->sched,
331 init->channel = chan->chan->chid;
333 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
334 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
335 NOUVEAU_GEM_DOMAIN_GART;
337 if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
338 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
340 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
342 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
343 init->subchan[0].handle = 0x00000000;
344 init->subchan[0].grclass = 0x0000;
345 init->subchan[1].handle = chan->chan->nvsw.handle;
346 init->subchan[1].grclass = 0x506e;
347 init->nr_subchan = 2;
350 /* Workaround "nvc0" gallium driver using classes it doesn't allocate on
351 * Kepler and above. NVKM no longer always sets CE_CTX_VALID as part of
352 * channel init, now we know what that stuff actually is.
354 * Doesn't matter for Kepler/Pascal, CE context stored in NV_RAMIN.
356 * Userspace was fixed prior to adding Ampere support.
358 switch (device->info.family) {
359 case NV_DEVICE_INFO_V0_VOLTA:
360 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, VOLTA_DMA_COPY_A,
365 case NV_DEVICE_INFO_V0_TURING:
366 ret = nvif_object_ctor(&chan->chan->user, "abi16CeWar", 0, TURING_DMA_COPY_A,
375 /* Named memory object area */
376 ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
379 ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
384 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
385 ret = nouveau_vma_new(chan->ntfy, chan->chan->vmm,
391 ret = drm_gem_handle_create(file_priv, &chan->ntfy->bo.base,
392 &init->notifier_handle);
396 ret = nvkm_mm_init(&chan->heap, 0, 0, PAGE_SIZE, 1);
399 nouveau_abi16_chan_fini(abi16, chan);
400 return nouveau_abi16_put(abi16, ret);
403 static struct nouveau_abi16_chan *
404 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
406 struct nouveau_abi16_chan *chan;
408 list_for_each_entry(chan, &abi16->channels, head) {
409 if (chan->chan->chid == channel)
417 nouveau_abi16_usif(struct drm_file *file_priv, void *data, u32 size)
420 struct nvif_ioctl_v0 v0;
422 struct nouveau_abi16_chan *chan;
423 struct nouveau_abi16 *abi16;
426 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
427 switch (args->v0.type) {
428 case NVIF_IOCTL_V0_NEW:
429 case NVIF_IOCTL_V0_MTHD:
430 case NVIF_IOCTL_V0_SCLASS:
438 if (!(abi16 = nouveau_abi16(file_priv)))
441 if (args->v0.token != ~0ULL) {
442 if (!(chan = nouveau_abi16_chan(abi16, args->v0.token)))
444 args->v0.object = nvif_handle(&chan->chan->user);
445 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
449 args->v0.object = nvif_handle(&abi16->device.object);
450 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
455 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
457 struct drm_nouveau_channel_free *req = data;
458 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
459 struct nouveau_abi16_chan *chan;
461 if (unlikely(!abi16))
464 chan = nouveau_abi16_chan(abi16, req->channel);
466 return nouveau_abi16_put(abi16, -ENOENT);
467 nouveau_abi16_chan_fini(abi16, chan);
468 return nouveau_abi16_put(abi16, 0);
472 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
474 struct drm_nouveau_grobj_alloc *init = data;
475 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
476 struct nouveau_abi16_chan *chan;
477 struct nouveau_abi16_ntfy *ntfy;
478 struct nvif_client *client;
479 struct nvif_sclass *sclass;
483 if (unlikely(!abi16))
486 if (init->handle == ~0)
487 return nouveau_abi16_put(abi16, -EINVAL);
488 client = abi16->device.object.client;
490 chan = nouveau_abi16_chan(abi16, init->channel);
492 return nouveau_abi16_put(abi16, -ENOENT);
494 ret = nvif_object_sclass_get(&chan->chan->user, &sclass);
496 return nouveau_abi16_put(abi16, ret);
498 if ((init->class & 0x00ff) == 0x006e) {
499 /* nvsw: compatibility with older 0x*6e class identifier */
500 for (i = 0; !oclass && i < ret; i++) {
501 switch (sclass[i].oclass) {
502 case NVIF_CLASS_SW_NV04:
503 case NVIF_CLASS_SW_NV10:
504 case NVIF_CLASS_SW_NV50:
505 case NVIF_CLASS_SW_GF100:
506 oclass = sclass[i].oclass;
513 if ((init->class & 0x00ff) == 0x00b1) {
514 /* msvld: compatibility with incorrect version exposure */
515 for (i = 0; i < ret; i++) {
516 if ((sclass[i].oclass & 0x00ff) == 0x00b1) {
517 oclass = sclass[i].oclass;
522 if ((init->class & 0x00ff) == 0x00b2) { /* mspdec */
523 /* mspdec: compatibility with incorrect version exposure */
524 for (i = 0; i < ret; i++) {
525 if ((sclass[i].oclass & 0x00ff) == 0x00b2) {
526 oclass = sclass[i].oclass;
531 if ((init->class & 0x00ff) == 0x00b3) { /* msppp */
532 /* msppp: compatibility with incorrect version exposure */
533 for (i = 0; i < ret; i++) {
534 if ((sclass[i].oclass & 0x00ff) == 0x00b3) {
535 oclass = sclass[i].oclass;
540 oclass = init->class;
543 nvif_object_sclass_put(&sclass);
545 return nouveau_abi16_put(abi16, -EINVAL);
547 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
549 return nouveau_abi16_put(abi16, -ENOMEM);
551 list_add(&ntfy->head, &chan->notifiers);
553 client->route = NVDRM_OBJECT_ABI16;
554 ret = nvif_object_ctor(&chan->chan->user, "abi16EngObj", init->handle,
555 oclass, NULL, 0, &ntfy->object);
556 client->route = NVDRM_OBJECT_NVIF;
559 nouveau_abi16_ntfy_fini(chan, ntfy);
560 return nouveau_abi16_put(abi16, ret);
564 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
566 struct drm_nouveau_notifierobj_alloc *info = data;
567 struct nouveau_drm *drm = nouveau_drm(dev);
568 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
569 struct nouveau_abi16_chan *chan;
570 struct nouveau_abi16_ntfy *ntfy;
571 struct nvif_device *device = &abi16->device;
572 struct nvif_client *client;
573 struct nv_dma_v0 args = {};
576 if (unlikely(!abi16))
579 /* completely unnecessary for these chipsets... */
580 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
581 return nouveau_abi16_put(abi16, -EINVAL);
582 client = abi16->device.object.client;
584 chan = nouveau_abi16_chan(abi16, info->channel);
586 return nouveau_abi16_put(abi16, -ENOENT);
588 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
590 return nouveau_abi16_put(abi16, -ENOMEM);
592 list_add(&ntfy->head, &chan->notifiers);
594 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
599 args.start = ntfy->node->offset;
600 args.limit = ntfy->node->offset + ntfy->node->length - 1;
601 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
602 args.target = NV_DMA_V0_TARGET_VM;
603 args.access = NV_DMA_V0_ACCESS_VM;
604 args.start += chan->ntfy_vma->addr;
605 args.limit += chan->ntfy_vma->addr;
607 if (drm->agp.bridge) {
608 args.target = NV_DMA_V0_TARGET_AGP;
609 args.access = NV_DMA_V0_ACCESS_RDWR;
610 args.start += drm->agp.base + chan->ntfy->offset;
611 args.limit += drm->agp.base + chan->ntfy->offset;
613 args.target = NV_DMA_V0_TARGET_VM;
614 args.access = NV_DMA_V0_ACCESS_RDWR;
615 args.start += chan->ntfy->offset;
616 args.limit += chan->ntfy->offset;
619 client->route = NVDRM_OBJECT_ABI16;
620 ret = nvif_object_ctor(&chan->chan->user, "abi16Ntfy", info->handle,
621 NV_DMA_IN_MEMORY, &args, sizeof(args),
623 client->route = NVDRM_OBJECT_NVIF;
627 info->offset = ntfy->node->offset;
630 nouveau_abi16_ntfy_fini(chan, ntfy);
631 return nouveau_abi16_put(abi16, ret);
635 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
637 struct drm_nouveau_gpuobj_free *fini = data;
638 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
639 struct nouveau_abi16_chan *chan;
640 struct nouveau_abi16_ntfy *ntfy;
643 if (unlikely(!abi16))
646 chan = nouveau_abi16_chan(abi16, fini->channel);
648 return nouveau_abi16_put(abi16, -EINVAL);
650 /* synchronize with the user channel and destroy the gpu object */
651 nouveau_channel_idle(chan->chan);
653 list_for_each_entry(ntfy, &chan->notifiers, head) {
654 if (ntfy->object.handle == fini->handle) {
655 nouveau_abi16_ntfy_fini(chan, ntfy);
661 return nouveau_abi16_put(abi16, ret);