2 * Copyright 2010 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/device.h>
26 #include <core/gpuobj.h>
28 #include <subdev/timer.h>
29 #include <subdev/fb.h>
30 #include <subdev/vm.h>
32 struct nvc0_vmmgr_priv {
33 struct nouveau_vmmgr base;
38 nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
39 struct nouveau_gpuobj *pgt[2])
41 u32 pde[2] = { 0, 0 };
44 pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
46 pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
48 nv_wo32(pgd, (index * 8) + 0, pde[0]);
49 nv_wo32(pgd, (index * 8) + 4, pde[1]);
53 nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
57 phys |= 0x00000001; /* present */
58 if (vma->access & NV_MEM_ACCESS_SYS)
61 phys |= ((u64)target << 32);
62 phys |= ((u64)memtype << 36);
68 nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
69 struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
71 u32 next = 1 << (vma->node->type - 8);
73 phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
76 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
77 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
84 nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
85 struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
87 u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
91 u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
92 nv_wo32(pgt, pte + 0, lower_32_bits(phys));
93 nv_wo32(pgt, pte + 4, upper_32_bits(phys));
99 nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
103 nv_wo32(pgt, pte + 0, 0x00000000);
104 nv_wo32(pgt, pte + 4, 0x00000000);
110 nvc0_vm_flush_engine(struct nouveau_subdev *subdev, u64 addr, int type)
112 struct nvc0_vmmgr_priv *priv = (void *)nouveau_vmmgr(subdev);
115 /* looks like maybe a "free flush slots" counter, the
116 * faster you write to 0x100cbc to more it decreases
118 spin_lock_irqsave(&priv->lock, flags);
119 if (!nv_wait_ne(subdev, 0x100c80, 0x00ff0000, 0x00000000)) {
120 nv_error(subdev, "vm timeout 0: 0x%08x %d\n",
121 nv_rd32(subdev, 0x100c80), type);
124 nv_wr32(subdev, 0x100cb8, addr >> 8);
125 nv_wr32(subdev, 0x100cbc, 0x80000000 | type);
127 /* wait for flush to be queued? */
128 if (!nv_wait(subdev, 0x100c80, 0x00008000, 0x00008000)) {
129 nv_error(subdev, "vm timeout 1: 0x%08x %d\n",
130 nv_rd32(subdev, 0x100c80), type);
132 spin_unlock_irqrestore(&priv->lock, flags);
136 nvc0_vm_flush(struct nouveau_vm *vm)
138 struct nouveau_vm_pgd *vpgd;
140 list_for_each_entry(vpgd, &vm->pgd_list, head) {
141 nvc0_vm_flush_engine(nv_subdev(vm->vmm), vpgd->obj->addr, 1);
146 nvc0_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
147 u64 mm_offset, struct nouveau_vm **pvm)
149 return nouveau_vm_create(vmm, offset, length, mm_offset, 4096, pvm);
153 nvc0_vmmgr_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nouveau_oclass *oclass, void *data, u32 size,
155 struct nouveau_object **pobject)
157 struct nvc0_vmmgr_priv *priv;
160 ret = nouveau_vmmgr_create(parent, engine, oclass, "VM", "vm", &priv);
161 *pobject = nv_object(priv);
165 priv->base.limit = 1ULL << 40;
166 priv->base.dma_bits = 40;
167 priv->base.pgt_bits = 27 - 12;
168 priv->base.spg_shift = 12;
169 priv->base.lpg_shift = 17;
170 priv->base.create = nvc0_vm_create;
171 priv->base.map_pgt = nvc0_vm_map_pgt;
172 priv->base.map = nvc0_vm_map;
173 priv->base.map_sg = nvc0_vm_map_sg;
174 priv->base.unmap = nvc0_vm_unmap;
175 priv->base.flush = nvc0_vm_flush;
176 spin_lock_init(&priv->lock);
180 struct nouveau_oclass
181 nvc0_vmmgr_oclass = {
182 .handle = NV_SUBDEV(VM, 0xc0),
183 .ofuncs = &(struct nouveau_ofuncs) {
184 .ctor = nvc0_vmmgr_ctor,
185 .dtor = _nouveau_vmmgr_dtor,
186 .init = _nouveau_vmmgr_init,
187 .fini = _nouveau_vmmgr_fini,