2 #include "nouveau_drv.h"
4 #define NV_CTXDMA_PAGE_SHIFT 12
5 #define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
6 #define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
8 struct nouveau_sgdma_be {
9 struct drm_ttm_backend backend;
10 struct drm_device *dev;
17 unsigned int pte_start;
21 nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
23 return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
27 nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
28 struct page **pages, struct page *dummy_read_page)
30 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
33 DRM_DEBUG("num_pages = %ld\n", num_pages);
37 nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
38 nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
41 nvbe->pages_populated = d = 0;
42 for (p = 0; p < num_pages; p++) {
43 for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
44 struct page *page = pages[p];
46 page = dummy_read_page;
47 nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
50 PCI_DMA_BIDIRECTIONAL);
51 if (pci_dma_mapping_error(nvbe->pagelist[d])) {
53 DRM_ERROR("pci_map_page failed\n");
56 nvbe->pages_populated = ++d;
64 nouveau_sgdma_clear(struct drm_ttm_backend *be)
66 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
71 if (nvbe && nvbe->pagelist) {
75 for (d = 0; d < nvbe->pages_populated; d++) {
76 pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
78 PCI_DMA_BIDIRECTIONAL);
80 drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
86 nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
88 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
89 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
90 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
91 uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
94 DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
95 offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
97 if (offset & NV_CTXDMA_PAGE_MASK)
99 nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
100 if (dev_priv->card_type < NV_50)
101 nvbe->pte_start += 2; /* skip ctxdma header */
103 for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
104 uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
106 if (pteval & NV_CTXDMA_PAGE_MASK) {
107 DRM_ERROR("Bad pteval 0x%llx\n", pteval);
111 if (dev_priv->card_type < NV_50) {
112 INSTANCE_WR(gpuobj, i, pteval | 3);
114 INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
115 INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
124 nouveau_sgdma_unbind(struct drm_ttm_backend *be)
126 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
127 struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
131 if (nvbe->is_bound) {
132 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
135 pte = nvbe->pte_start;
136 while (pte < (nvbe->pte_start + nvbe->pages)) {
137 uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
139 if (dev_priv->card_type < NV_50) {
140 INSTANCE_WR(gpuobj, pte, pteval | 3);
142 INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
143 INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
156 nouveau_sgdma_destroy(struct drm_ttm_backend *be)
160 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
164 drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
169 static struct drm_ttm_backend_func nouveau_sgdma_backend = {
170 .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust,
171 .populate = nouveau_sgdma_populate,
172 .clear = nouveau_sgdma_clear,
173 .bind = nouveau_sgdma_bind,
174 .unbind = nouveau_sgdma_unbind,
175 .destroy = nouveau_sgdma_destroy
178 struct drm_ttm_backend *
179 nouveau_sgdma_init_ttm(struct drm_device *dev)
181 struct drm_nouveau_private *dev_priv = dev->dev_private;
182 struct nouveau_sgdma_be *nvbe;
184 if (!dev_priv->gart_info.sg_ctxdma)
187 nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
193 nvbe->backend.func = &nouveau_sgdma_backend;
195 return &nvbe->backend;
199 nouveau_sgdma_init(struct drm_device *dev)
201 struct drm_nouveau_private *dev_priv = dev->dev_private;
202 struct nouveau_gpuobj *gpuobj = NULL;
203 uint32_t aper_size, obj_size;
206 if (dev_priv->card_type < NV_50) {
207 aper_size = (64 * 1024 * 1024);
208 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
209 obj_size += 8; /* ctxdma header */
211 /* 1 entire VM page table */
212 aper_size = (512 * 1024 * 1024);
213 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
216 if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
217 NVOBJ_FLAG_ALLOW_NO_REFS |
218 NVOBJ_FLAG_ZERO_ALLOC |
219 NVOBJ_FLAG_ZERO_FREE, &gpuobj))) {
220 DRM_ERROR("Error creating sgdma object: %d\n", ret);
224 dev_priv->gart_info.sg_dummy_page =
225 alloc_page(GFP_KERNEL|__GFP_DMA32);
226 SetPageLocked(dev_priv->gart_info.sg_dummy_page);
227 dev_priv->gart_info.sg_dummy_bus =
228 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
229 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
231 if (dev_priv->card_type < NV_50) {
232 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
233 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
235 INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
236 (1 << 12) /* PT present */ |
237 (0 << 13) /* PT *not* linear */ |
238 (NV_DMA_ACCESS_RW << 14) |
239 (NV_DMA_TARGET_PCI << 16));
240 INSTANCE_WR(gpuobj, 1, aper_size - 1);
241 for (i=2; i<2+(aper_size>>12); i++) {
242 INSTANCE_WR(gpuobj, i,
243 dev_priv->gart_info.sg_dummy_bus | 3);
246 for (i=0; i<obj_size; i+=8) {
247 INSTANCE_WR(gpuobj, (i+0)/4,
248 dev_priv->gart_info.sg_dummy_bus | 0x21);
249 INSTANCE_WR(gpuobj, (i+4)/4, 0);
253 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
254 dev_priv->gart_info.aper_base = 0;
255 dev_priv->gart_info.aper_size = aper_size;
256 dev_priv->gart_info.sg_ctxdma = gpuobj;
261 nouveau_sgdma_takedown(struct drm_device *dev)
263 struct drm_nouveau_private *dev_priv = dev->dev_private;
265 if (dev_priv->gart_info.sg_dummy_page) {
266 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
267 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
268 unlock_page(dev_priv->gart_info.sg_dummy_page);
269 __free_page(dev_priv->gart_info.sg_dummy_page);
270 dev_priv->gart_info.sg_dummy_page = NULL;
271 dev_priv->gart_info.sg_dummy_bus = 0;
274 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
278 nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
280 struct drm_nouveau_private *dev_priv = dev->dev_private;
281 struct drm_ttm_backend *be;
282 struct drm_scatter_gather sgreq;
283 struct drm_mm_node mm_node;
284 struct drm_bo_mem_reg mem;
287 dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
288 if (!dev_priv->gart_info.sg_be)
290 be = dev_priv->gart_info.sg_be;
292 /* Hack the aperture size down to the amount of system memory
293 * we're going to bind into it.
295 if (dev_priv->gart_info.aper_size > 32*1024*1024)
296 dev_priv->gart_info.aper_size = 32*1024*1024;
298 sgreq.size = dev_priv->gart_info.aper_size;
299 if ((ret = drm_sg_alloc(dev, &sgreq))) {
300 DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
303 dev_priv->gart_info.sg_handle = sgreq.handle;
305 if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
306 DRM_ERROR("failed populate: %d\n", ret);
311 mem.mm_node = &mm_node;
313 if ((ret = be->func->bind(be, &mem))) {
314 DRM_ERROR("failed bind: %d\n", ret);
322 nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
327 nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
329 struct drm_nouveau_private *dev_priv = dev->dev_private;
330 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
333 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
334 if (dev_priv->card_type < NV_50) {
335 *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
339 DRM_ERROR("Unimplemented on NV50\n");