[intel] Quirk away MSI support on 945G/GM.
[platform/upstream/libdrm.git] / linux-core / nouveau_sgdma.c
1 #include "drmP.h"
2 #include "nouveau_drv.h"
3
4 #define NV_CTXDMA_PAGE_SHIFT 12
5 #define NV_CTXDMA_PAGE_SIZE  (1 << NV_CTXDMA_PAGE_SHIFT)
6 #define NV_CTXDMA_PAGE_MASK  (NV_CTXDMA_PAGE_SIZE - 1)
7
8 struct nouveau_sgdma_be {
9         struct drm_ttm_backend backend;
10         struct drm_device *dev;
11
12         int         pages;
13         int         pages_populated;
14         dma_addr_t *pagelist;
15         int         is_bound;
16
17         unsigned int pte_start;
18 };
19
20 static int
21 nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
22 {
23         return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
24 }
25
26 static int
27 nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
28                        struct page **pages, struct page *dummy_read_page)
29 {
30         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
31         int p, d, o;
32
33         DRM_DEBUG("num_pages = %ld\n", num_pages);
34
35         if (nvbe->pagelist)
36                 return -EINVAL;
37         nvbe->pages    = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
38         nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
39                                    DRM_MEM_PAGES);
40
41         nvbe->pages_populated = d = 0;
42         for (p = 0; p < num_pages; p++) {
43                 for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
44                         struct page *page = pages[p];
45                         if (!page)
46                                 page = dummy_read_page;
47                         nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
48                                                          page, o,
49                                                          NV_CTXDMA_PAGE_SIZE,
50                                                          PCI_DMA_BIDIRECTIONAL);
51                         if (pci_dma_mapping_error(nvbe->pagelist[d])) {
52                                 be->func->clear(be);
53                                 DRM_ERROR("pci_map_page failed\n");
54                                 return -EINVAL;
55                         }
56                         nvbe->pages_populated = ++d;
57                 }
58         }
59
60         return 0;
61 }
62
63 static void
64 nouveau_sgdma_clear(struct drm_ttm_backend *be)
65 {
66         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
67         int d;
68
69         DRM_DEBUG("\n");
70
71         if (nvbe && nvbe->pagelist) {
72                 if (nvbe->is_bound)
73                         be->func->unbind(be);
74
75                 for (d = 0; d < nvbe->pages_populated; d++) {
76                         pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
77                                        NV_CTXDMA_PAGE_SIZE,
78                                        PCI_DMA_BIDIRECTIONAL);
79                 }
80                 drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
81                          DRM_MEM_PAGES);
82         }
83 }
84
85 static int
86 nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
87 {
88         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
89         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
90         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
91         uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
92         uint32_t i;
93
94         DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
95                   offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
96
97         if (offset & NV_CTXDMA_PAGE_MASK)
98                 return -EINVAL;
99         nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
100         if (dev_priv->card_type < NV_50)
101                 nvbe->pte_start += 2; /* skip ctxdma header */
102
103         for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
104                 uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
105
106                 if (pteval & NV_CTXDMA_PAGE_MASK) {
107                         DRM_ERROR("Bad pteval 0x%llx\n", pteval);
108                         return -EINVAL;
109                 }
110
111                 if (dev_priv->card_type < NV_50) {
112                         INSTANCE_WR(gpuobj, i, pteval | 3);
113                 } else {
114                         INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
115                         INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
116                 }
117         }
118
119         nvbe->is_bound  = 1;
120         return 0;
121 }
122
123 static int
124 nouveau_sgdma_unbind(struct drm_ttm_backend *be)
125 {
126         struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
127         struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
128
129         DRM_DEBUG("\n");
130
131         if (nvbe->is_bound) {
132                 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
133                 unsigned int pte;
134
135                 pte = nvbe->pte_start;
136                 while (pte < (nvbe->pte_start + nvbe->pages)) {
137                         uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
138
139                         if (dev_priv->card_type < NV_50) {
140                                 INSTANCE_WR(gpuobj, pte, pteval | 3);
141                         } else {
142                                 INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
143                                 INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
144                         }
145
146                         pte++;
147                 }
148
149                 nvbe->is_bound = 0;
150         }
151
152         return 0;
153 }
154
155 static void
156 nouveau_sgdma_destroy(struct drm_ttm_backend *be)
157 {
158         DRM_DEBUG("\n");
159         if (be) {
160                 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
161                 if (nvbe) {
162                         if (nvbe->pagelist)
163                                 be->func->clear(be);
164                         drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
165                 }
166         }
167 }
168
169 static struct drm_ttm_backend_func nouveau_sgdma_backend = {
170         .needs_ub_cache_adjust  = nouveau_sgdma_needs_ub_cache_adjust,
171         .populate               = nouveau_sgdma_populate,
172         .clear                  = nouveau_sgdma_clear,
173         .bind                   = nouveau_sgdma_bind,
174         .unbind                 = nouveau_sgdma_unbind,
175         .destroy                = nouveau_sgdma_destroy
176 };
177
178 struct drm_ttm_backend *
179 nouveau_sgdma_init_ttm(struct drm_device *dev)
180 {
181         struct drm_nouveau_private *dev_priv = dev->dev_private;
182         struct nouveau_sgdma_be *nvbe;
183
184         if (!dev_priv->gart_info.sg_ctxdma)
185                 return NULL;
186
187         nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
188         if (!nvbe)
189                 return NULL;
190
191         nvbe->dev = dev;
192
193         nvbe->backend.func      = &nouveau_sgdma_backend;
194
195         return &nvbe->backend;
196 }
197
198 int
199 nouveau_sgdma_init(struct drm_device *dev)
200 {
201         struct drm_nouveau_private *dev_priv = dev->dev_private;
202         struct nouveau_gpuobj *gpuobj = NULL;
203         uint32_t aper_size, obj_size;
204         int i, ret;
205
206         if (dev_priv->card_type < NV_50) {
207                 aper_size = (64 * 1024 * 1024);
208                 obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
209                 obj_size += 8; /* ctxdma header */
210         } else {
211                 /* 1 entire VM page table */
212                 aper_size = (512 * 1024 * 1024);
213                 obj_size  = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
214         }
215
216         if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
217                                       NVOBJ_FLAG_ALLOW_NO_REFS |
218                                       NVOBJ_FLAG_ZERO_ALLOC |
219                                       NVOBJ_FLAG_ZERO_FREE, &gpuobj)))  {
220                 DRM_ERROR("Error creating sgdma object: %d\n", ret);
221                 return ret;
222         }
223
224         dev_priv->gart_info.sg_dummy_page =
225                 alloc_page(GFP_KERNEL|__GFP_DMA32);
226         SetPageLocked(dev_priv->gart_info.sg_dummy_page);
227         dev_priv->gart_info.sg_dummy_bus =
228                 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
229                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
230
231         if (dev_priv->card_type < NV_50) {
232                 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
233                  * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
234                  * on those cards? */
235                 INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
236                                        (1 << 12) /* PT present */ |
237                                        (0 << 13) /* PT *not* linear */ |
238                                        (NV_DMA_ACCESS_RW  << 14) |
239                                        (NV_DMA_TARGET_PCI << 16));
240                 INSTANCE_WR(gpuobj, 1, aper_size - 1);
241                 for (i=2; i<2+(aper_size>>12); i++) {
242                         INSTANCE_WR(gpuobj, i,
243                                     dev_priv->gart_info.sg_dummy_bus | 3);
244                 }
245         } else {
246                 for (i=0; i<obj_size; i+=8) {
247                         INSTANCE_WR(gpuobj, (i+0)/4,
248                                     dev_priv->gart_info.sg_dummy_bus | 0x21);
249                         INSTANCE_WR(gpuobj, (i+4)/4, 0);
250                 }
251         }
252
253         dev_priv->gart_info.type      = NOUVEAU_GART_SGDMA;
254         dev_priv->gart_info.aper_base = 0;
255         dev_priv->gart_info.aper_size = aper_size;
256         dev_priv->gart_info.sg_ctxdma = gpuobj;
257         return 0;
258 }
259
260 void
261 nouveau_sgdma_takedown(struct drm_device *dev)
262 {
263         struct drm_nouveau_private *dev_priv = dev->dev_private;
264
265         if (dev_priv->gart_info.sg_dummy_page) {
266                 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
267                                NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
268                 unlock_page(dev_priv->gart_info.sg_dummy_page);
269                 __free_page(dev_priv->gart_info.sg_dummy_page);
270                 dev_priv->gart_info.sg_dummy_page = NULL;
271                 dev_priv->gart_info.sg_dummy_bus = 0;
272         }
273
274         nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
275 }
276
277 int
278 nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
279 {
280         struct drm_nouveau_private *dev_priv = dev->dev_private;
281         struct drm_ttm_backend *be;
282         struct drm_scatter_gather sgreq;
283         struct drm_memrange_node mm_node;
284         struct drm_bo_mem_reg mem;
285         int ret;
286
287         dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
288         if (!dev_priv->gart_info.sg_be)
289                 return -ENOMEM;
290         be = dev_priv->gart_info.sg_be;
291
292         /* Hack the aperture size down to the amount of system memory
293          * we're going to bind into it.
294          */
295         if (dev_priv->gart_info.aper_size > 32*1024*1024)
296                 dev_priv->gart_info.aper_size = 32*1024*1024;
297
298         sgreq.size = dev_priv->gart_info.aper_size;
299         if ((ret = drm_sg_alloc(dev, &sgreq))) {
300                 DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
301                 return ret;
302         }
303         dev_priv->gart_info.sg_handle = sgreq.handle;
304
305         if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
306                 DRM_ERROR("failed populate: %d\n", ret);
307                 return ret;
308         }
309
310         mm_node.start = 0;
311         mem.mm_node = &mm_node;
312
313         if ((ret = be->func->bind(be, &mem))) {
314                 DRM_ERROR("failed bind: %d\n", ret);
315                 return ret;
316         }
317
318         return 0;
319 }
320
321 void
322 nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
323 {
324 }
325
326 int
327 nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
328 {
329         struct drm_nouveau_private *dev_priv = dev->dev_private;
330         struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
331         int pte;
332
333         pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
334         if (dev_priv->card_type < NV_50) {
335                 *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
336                 return 0;
337         }
338
339         DRM_ERROR("Unimplemented on NV50\n");
340         return -EINVAL;
341 }