If the VMA is being deleted, we don't need to explicity unmap first
anymore. The MMU code will automatically merge the operations into
a single page tree walk.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
if (!mem->bar_vma.node)
return;
- nvkm_vm_unmap(&mem->bar_vma);
nvkm_vm_put(&mem->bar_vma);
}
void
nouveau_mem_fini(struct nouveau_mem *mem)
{
- if (mem->vma[1].node) {
- nvkm_vm_unmap(&mem->vma[1]);
- nvkm_vm_put(&mem->vma[1]);
- }
- if (mem->vma[0].node) {
- nvkm_vm_unmap(&mem->vma[0]);
- nvkm_vm_put(&mem->vma[0]);
- }
+ nvkm_vm_put(&mem->vma[1]);
+ nvkm_vm_put(&mem->vma[0]);
}
int
{
struct nouveau_vma *vma = *pvma;
if (vma && --vma->refs <= 0) {
- if (likely(vma->addr != ~0ULL)) {
- nouveau_vma_unmap(vma);
+ if (likely(vma->addr != ~0ULL))
nvkm_vm_put(&vma->_vma);
- }
list_del(&vma->head);
*pvma = NULL;
kfree(*pvma);