drm/nouveau/mmu: switch to subdev printk macros
authorBen Skeggs <bskeggs@redhat.com>
Thu, 20 Aug 2015 04:54:12 +0000 (14:54 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 28 Aug 2015 02:40:23 +0000 (12:40 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv44.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/nv50.c

index 64203ab..76e1a65 100644 (file)
@@ -187,7 +187,7 @@ nv44_mmu_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
 
        mmu->nullp = pci_alloc_consistent(device->pdev, 16 * 1024, &mmu->null);
        if (!mmu->nullp) {
-               nv_warn(mmu, "unable to allocate dummy pages\n");
+               nvkm_warn(&mmu->base.subdev, "unable to allocate dummy pages\n");
                mmu->null = 0;
        }
 
index 45b5b5a..0216c88 100644 (file)
@@ -146,14 +146,15 @@ static void
 nv50_vm_flush(struct nvkm_vm *vm)
 {
        struct nvkm_mmu *mmu = (void *)vm->mmu;
-       struct nvkm_device *device = mmu->subdev.device;
+       struct nvkm_subdev *subdev = &mmu->subdev;
+       struct nvkm_device *device = subdev->device;
        struct nvkm_bar *bar = device->bar;
        struct nvkm_engine *engine;
        int i, vme;
 
        bar->flush(bar);
 
-       mutex_lock(&nv_subdev(mmu)->mutex);
+       mutex_lock(&subdev->mutex);
        for (i = 0; i < NVDEV_SUBDEV_NR; i++) {
                if (!atomic_read(&vm->engref[i]))
                        continue;
@@ -186,9 +187,9 @@ nv50_vm_flush(struct nvkm_vm *vm)
                        if (!(nvkm_rd32(device, 0x100c80) & 0x00000001))
                                break;
                ) < 0)
-                       nv_error(mmu, "vm flush timeout: engine %d\n", vme);
+                       nvkm_error(subdev, "vm flush timeout: engine %d\n", vme);
        }
-       mutex_unlock(&nv_subdev(mmu)->mutex);
+       mutex_unlock(&subdev->mutex);
 }
 
 static int