Merge drm/drm-next into drm-intel-next
[platform/kernel/linux-starfive.git] / drivers / gpu / drm / i915 / gt / intel_ppgtt.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5
6 #include <linux/slab.h>
7
8 #include "i915_trace.h"
9 #include "intel_gtt.h"
10 #include "gen6_ppgtt.h"
11 #include "gen8_ppgtt.h"
12
13 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
14 {
15         struct i915_page_table *pt;
16
17         pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
18         if (unlikely(!pt))
19                 return ERR_PTR(-ENOMEM);
20
21         pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
22         if (IS_ERR(pt->base)) {
23                 kfree(pt);
24                 return ERR_PTR(-ENOMEM);
25         }
26
27         atomic_set(&pt->used, 0);
28         return pt;
29 }
30
31 struct i915_page_directory *__alloc_pd(int count)
32 {
33         struct i915_page_directory *pd;
34
35         pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
36         if (unlikely(!pd))
37                 return NULL;
38
39         pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
40         if (unlikely(!pd->entry)) {
41                 kfree(pd);
42                 return NULL;
43         }
44
45         spin_lock_init(&pd->lock);
46         return pd;
47 }
48
49 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
50 {
51         struct i915_page_directory *pd;
52
53         pd = __alloc_pd(I915_PDES);
54         if (unlikely(!pd))
55                 return ERR_PTR(-ENOMEM);
56
57         pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
58         if (IS_ERR(pd->pt.base)) {
59                 kfree(pd->entry);
60                 kfree(pd);
61                 return ERR_PTR(-ENOMEM);
62         }
63
64         return pd;
65 }
66
67 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
68 {
69         BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
70
71         if (lvl) {
72                 struct i915_page_directory *pd =
73                         container_of(pt, typeof(*pd), pt);
74                 kfree(pd->entry);
75         }
76
77         if (pt->base)
78                 i915_gem_object_put(pt->base);
79
80         kfree(pt);
81 }
82
83 static void
84 write_dma_entry(struct drm_i915_gem_object * const pdma,
85                 const unsigned short idx,
86                 const u64 encoded_entry)
87 {
88         u64 * const vaddr = kmap_atomic(__px_page(pdma));
89
90         vaddr[idx] = encoded_entry;
91         clflush_cache_range(&vaddr[idx], sizeof(u64));
92         kunmap_atomic(vaddr);
93 }
94
95 void
96 __set_pd_entry(struct i915_page_directory * const pd,
97                const unsigned short idx,
98                struct i915_page_table * const to,
99                u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
100 {
101         /* Each thread pre-pins the pd, and we may have a thread per pde. */
102         GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
103
104         atomic_inc(px_used(pd));
105         pd->entry[idx] = to;
106         write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
107 }
108
109 void
110 clear_pd_entry(struct i915_page_directory * const pd,
111                const unsigned short idx,
112                const struct drm_i915_gem_object * const scratch)
113 {
114         GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
115
116         write_dma_entry(px_base(pd), idx, scratch->encode);
117         pd->entry[idx] = NULL;
118         atomic_dec(px_used(pd));
119 }
120
121 bool
122 release_pd_entry(struct i915_page_directory * const pd,
123                  const unsigned short idx,
124                  struct i915_page_table * const pt,
125                  const struct drm_i915_gem_object * const scratch)
126 {
127         bool free = false;
128
129         if (atomic_add_unless(&pt->used, -1, 1))
130                 return false;
131
132         spin_lock(&pd->lock);
133         if (atomic_dec_and_test(&pt->used)) {
134                 clear_pd_entry(pd, idx, scratch);
135                 free = true;
136         }
137         spin_unlock(&pd->lock);
138
139         return free;
140 }
141
142 int i915_ppgtt_init_hw(struct intel_gt *gt)
143 {
144         struct drm_i915_private *i915 = gt->i915;
145
146         gtt_write_workarounds(gt);
147
148         if (IS_GEN(i915, 6))
149                 gen6_ppgtt_enable(gt);
150         else if (IS_GEN(i915, 7))
151                 gen7_ppgtt_enable(gt);
152
153         return 0;
154 }
155
156 static struct i915_ppgtt *
157 __ppgtt_create(struct intel_gt *gt)
158 {
159         if (INTEL_GEN(gt->i915) < 8)
160                 return gen6_ppgtt_create(gt);
161         else
162                 return gen8_ppgtt_create(gt);
163 }
164
165 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
166 {
167         struct i915_ppgtt *ppgtt;
168
169         ppgtt = __ppgtt_create(gt);
170         if (IS_ERR(ppgtt))
171                 return ppgtt;
172
173         trace_i915_ppgtt_create(&ppgtt->vm);
174
175         return ppgtt;
176 }
177
178 void ppgtt_bind_vma(struct i915_address_space *vm,
179                     struct i915_vm_pt_stash *stash,
180                     struct i915_vma *vma,
181                     enum i915_cache_level cache_level,
182                     u32 flags)
183 {
184         u32 pte_flags;
185
186         if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
187                 vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
188                 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
189         }
190
191         /* Applicable to VLV, and gen8+ */
192         pte_flags = 0;
193         if (i915_gem_object_is_readonly(vma->obj))
194                 pte_flags |= PTE_READ_ONLY;
195
196         vm->insert_entries(vm, vma, cache_level, pte_flags);
197         wmb();
198 }
199
200 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
201 {
202         if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
203                 vm->clear_range(vm, vma->node.start, vma->size);
204 }
205
206 static unsigned long pd_count(u64 size, int shift)
207 {
208         /* Beware later misalignment */
209         return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
210 }
211
212 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
213                            struct i915_vm_pt_stash *stash,
214                            u64 size)
215 {
216         unsigned long count;
217         int shift, n;
218
219         shift = vm->pd_shift;
220         if (!shift)
221                 return 0;
222
223         count = pd_count(size, shift);
224         while (count--) {
225                 struct i915_page_table *pt;
226
227                 pt = alloc_pt(vm);
228                 if (IS_ERR(pt)) {
229                         i915_vm_free_pt_stash(vm, stash);
230                         return PTR_ERR(pt);
231                 }
232
233                 pt->stash = stash->pt[0];
234                 stash->pt[0] = pt;
235         }
236
237         for (n = 1; n < vm->top; n++) {
238                 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
239                 count = pd_count(size, shift);
240                 while (count--) {
241                         struct i915_page_directory *pd;
242
243                         pd = alloc_pd(vm);
244                         if (IS_ERR(pd)) {
245                                 i915_vm_free_pt_stash(vm, stash);
246                                 return PTR_ERR(pd);
247                         }
248
249                         pd->pt.stash = stash->pt[1];
250                         stash->pt[1] = &pd->pt;
251                 }
252         }
253
254         return 0;
255 }
256
257 int i915_vm_pin_pt_stash(struct i915_address_space *vm,
258                          struct i915_vm_pt_stash *stash)
259 {
260         struct i915_page_table *pt;
261         int n, err;
262
263         for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
264                 for (pt = stash->pt[n]; pt; pt = pt->stash) {
265                         err = pin_pt_dma(vm, pt->base);
266                         if (err)
267                                 return err;
268                 }
269         }
270
271         return 0;
272 }
273
274 void i915_vm_free_pt_stash(struct i915_address_space *vm,
275                            struct i915_vm_pt_stash *stash)
276 {
277         struct i915_page_table *pt;
278         int n;
279
280         for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
281                 while ((pt = stash->pt[n])) {
282                         stash->pt[n] = pt->stash;
283                         free_px(vm, pt, n);
284                 }
285         }
286 }
287
288 int ppgtt_set_pages(struct i915_vma *vma)
289 {
290         GEM_BUG_ON(vma->pages);
291
292         vma->pages = vma->obj->mm.pages;
293         vma->page_sizes = vma->obj->mm.page_sizes;
294
295         return 0;
296 }
297
298 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
299 {
300         struct drm_i915_private *i915 = gt->i915;
301
302         ppgtt->vm.gt = gt;
303         ppgtt->vm.i915 = i915;
304         ppgtt->vm.dma = i915->drm.dev;
305         ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
306
307         i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
308
309         ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
310         ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
311         ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
312         ppgtt->vm.vma_ops.clear_pages = clear_pages;
313 }