Merge tag 'drm-misc-next-fixes-2018-04-04' of git://anongit.freedesktop.org/drm/drm...
[platform/kernel/linux-rpi.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 #include "vmwgfx_binding.h"
35
36 #define VMW_RES_EVICT_ERR_COUNT 10
37
38 struct vmw_user_dma_buffer {
39         struct ttm_prime_object prime;
40         struct vmw_dma_buffer dma;
41 };
42
43 struct vmw_bo_user_rep {
44         uint32_t handle;
45         uint64_t map_handle;
46 };
47
48 static inline struct vmw_dma_buffer *
49 vmw_dma_buffer(struct ttm_buffer_object *bo)
50 {
51         return container_of(bo, struct vmw_dma_buffer, base);
52 }
53
54 static inline struct vmw_user_dma_buffer *
55 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
56 {
57         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
58         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
59 }
60
61 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
62 {
63         kref_get(&res->kref);
64         return res;
65 }
66
67 struct vmw_resource *
68 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
69 {
70         return kref_get_unless_zero(&res->kref) ? res : NULL;
71 }
72
73 /**
74  * vmw_resource_release_id - release a resource id to the id manager.
75  *
76  * @res: Pointer to the resource.
77  *
78  * Release the resource id to the resource id manager and set it to -1
79  */
80 void vmw_resource_release_id(struct vmw_resource *res)
81 {
82         struct vmw_private *dev_priv = res->dev_priv;
83         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
84
85         write_lock(&dev_priv->resource_lock);
86         if (res->id != -1)
87                 idr_remove(idr, res->id);
88         res->id = -1;
89         write_unlock(&dev_priv->resource_lock);
90 }
91
92 static void vmw_resource_release(struct kref *kref)
93 {
94         struct vmw_resource *res =
95             container_of(kref, struct vmw_resource, kref);
96         struct vmw_private *dev_priv = res->dev_priv;
97         int id;
98         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
99
100         write_lock(&dev_priv->resource_lock);
101         res->avail = false;
102         list_del_init(&res->lru_head);
103         write_unlock(&dev_priv->resource_lock);
104         if (res->backup) {
105                 struct ttm_buffer_object *bo = &res->backup->base;
106
107                 ttm_bo_reserve(bo, false, false, NULL);
108                 if (!list_empty(&res->mob_head) &&
109                     res->func->unbind != NULL) {
110                         struct ttm_validate_buffer val_buf;
111
112                         val_buf.bo = bo;
113                         val_buf.shared = false;
114                         res->func->unbind(res, false, &val_buf);
115                 }
116                 res->backup_dirty = false;
117                 list_del_init(&res->mob_head);
118                 ttm_bo_unreserve(bo);
119                 vmw_dmabuf_unreference(&res->backup);
120         }
121
122         if (likely(res->hw_destroy != NULL)) {
123                 mutex_lock(&dev_priv->binding_mutex);
124                 vmw_binding_res_list_kill(&res->binding_head);
125                 mutex_unlock(&dev_priv->binding_mutex);
126                 res->hw_destroy(res);
127         }
128
129         id = res->id;
130         if (res->res_free != NULL)
131                 res->res_free(res);
132         else
133                 kfree(res);
134
135         write_lock(&dev_priv->resource_lock);
136         if (id != -1)
137                 idr_remove(idr, id);
138         write_unlock(&dev_priv->resource_lock);
139 }
140
141 void vmw_resource_unreference(struct vmw_resource **p_res)
142 {
143         struct vmw_resource *res = *p_res;
144
145         *p_res = NULL;
146         kref_put(&res->kref, vmw_resource_release);
147 }
148
149
150 /**
151  * vmw_resource_alloc_id - release a resource id to the id manager.
152  *
153  * @res: Pointer to the resource.
154  *
155  * Allocate the lowest free resource from the resource manager, and set
156  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
157  */
158 int vmw_resource_alloc_id(struct vmw_resource *res)
159 {
160         struct vmw_private *dev_priv = res->dev_priv;
161         int ret;
162         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
163
164         BUG_ON(res->id != -1);
165
166         idr_preload(GFP_KERNEL);
167         write_lock(&dev_priv->resource_lock);
168
169         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
170         if (ret >= 0)
171                 res->id = ret;
172
173         write_unlock(&dev_priv->resource_lock);
174         idr_preload_end();
175         return ret < 0 ? ret : 0;
176 }
177
178 /**
179  * vmw_resource_init - initialize a struct vmw_resource
180  *
181  * @dev_priv:       Pointer to a device private struct.
182  * @res:            The struct vmw_resource to initialize.
183  * @obj_type:       Resource object type.
184  * @delay_id:       Boolean whether to defer device id allocation until
185  *                  the first validation.
186  * @res_free:       Resource destructor.
187  * @func:           Resource function table.
188  */
189 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
190                       bool delay_id,
191                       void (*res_free) (struct vmw_resource *res),
192                       const struct vmw_res_func *func)
193 {
194         kref_init(&res->kref);
195         res->hw_destroy = NULL;
196         res->res_free = res_free;
197         res->avail = false;
198         res->dev_priv = dev_priv;
199         res->func = func;
200         INIT_LIST_HEAD(&res->lru_head);
201         INIT_LIST_HEAD(&res->mob_head);
202         INIT_LIST_HEAD(&res->binding_head);
203         res->id = -1;
204         res->backup = NULL;
205         res->backup_offset = 0;
206         res->backup_dirty = false;
207         res->res_dirty = false;
208         if (delay_id)
209                 return 0;
210         else
211                 return vmw_resource_alloc_id(res);
212 }
213
214 /**
215  * vmw_resource_activate
216  *
217  * @res:        Pointer to the newly created resource
218  * @hw_destroy: Destroy function. NULL if none.
219  *
220  * Activate a resource after the hardware has been made aware of it.
221  * Set tye destroy function to @destroy. Typically this frees the
222  * resource and destroys the hardware resources associated with it.
223  * Activate basically means that the function vmw_resource_lookup will
224  * find it.
225  */
226 void vmw_resource_activate(struct vmw_resource *res,
227                            void (*hw_destroy) (struct vmw_resource *))
228 {
229         struct vmw_private *dev_priv = res->dev_priv;
230
231         write_lock(&dev_priv->resource_lock);
232         res->avail = true;
233         res->hw_destroy = hw_destroy;
234         write_unlock(&dev_priv->resource_lock);
235 }
236
237 /**
238  * vmw_user_resource_lookup_handle - lookup a struct resource from a
239  * TTM user-space handle and perform basic type checks
240  *
241  * @dev_priv:     Pointer to a device private struct
242  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
243  * @handle:       The TTM user-space handle
244  * @converter:    Pointer to an object describing the resource type
245  * @p_res:        On successful return the location pointed to will contain
246  *                a pointer to a refcounted struct vmw_resource.
247  *
248  * If the handle can't be found or is associated with an incorrect resource
249  * type, -EINVAL will be returned.
250  */
251 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
252                                     struct ttm_object_file *tfile,
253                                     uint32_t handle,
254                                     const struct vmw_user_resource_conv
255                                     *converter,
256                                     struct vmw_resource **p_res)
257 {
258         struct ttm_base_object *base;
259         struct vmw_resource *res;
260         int ret = -EINVAL;
261
262         base = ttm_base_object_lookup(tfile, handle);
263         if (unlikely(base == NULL))
264                 return -EINVAL;
265
266         if (unlikely(ttm_base_object_type(base) != converter->object_type))
267                 goto out_bad_resource;
268
269         res = converter->base_obj_to_res(base);
270
271         read_lock(&dev_priv->resource_lock);
272         if (!res->avail || res->res_free != converter->res_free) {
273                 read_unlock(&dev_priv->resource_lock);
274                 goto out_bad_resource;
275         }
276
277         kref_get(&res->kref);
278         read_unlock(&dev_priv->resource_lock);
279
280         *p_res = res;
281         ret = 0;
282
283 out_bad_resource:
284         ttm_base_object_unref(&base);
285
286         return ret;
287 }
288
289 /**
290  * Helper function that looks either a surface or dmabuf.
291  *
292  * The pointer this pointed at by out_surf and out_buf needs to be null.
293  */
294 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
295                            struct ttm_object_file *tfile,
296                            uint32_t handle,
297                            struct vmw_surface **out_surf,
298                            struct vmw_dma_buffer **out_buf)
299 {
300         struct vmw_resource *res;
301         int ret;
302
303         BUG_ON(*out_surf || *out_buf);
304
305         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
306                                               user_surface_converter,
307                                               &res);
308         if (!ret) {
309                 *out_surf = vmw_res_to_srf(res);
310                 return 0;
311         }
312
313         *out_surf = NULL;
314         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
315         return ret;
316 }
317
318 /**
319  * Buffer management.
320  */
321
322 /**
323  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
324  *
325  * @dev_priv: Pointer to a struct vmw_private identifying the device.
326  * @size: The requested buffer size.
327  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
328  */
329 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
330                                   bool user)
331 {
332         static size_t struct_size, user_struct_size;
333         size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
334         size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
335
336         if (unlikely(struct_size == 0)) {
337                 size_t backend_size = ttm_round_pot(vmw_tt_size);
338
339                 struct_size = backend_size +
340                         ttm_round_pot(sizeof(struct vmw_dma_buffer));
341                 user_struct_size = backend_size +
342                         ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
343         }
344
345         if (dev_priv->map_mode == vmw_dma_alloc_coherent)
346                 page_array_size +=
347                         ttm_round_pot(num_pages * sizeof(dma_addr_t));
348
349         return ((user) ? user_struct_size : struct_size) +
350                 page_array_size;
351 }
352
353 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
354 {
355         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
356
357         vmw_dma_buffer_unmap(vmw_bo);
358         kfree(vmw_bo);
359 }
360
361 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
362 {
363         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
364
365         vmw_dma_buffer_unmap(&vmw_user_bo->dma);
366         ttm_prime_object_kfree(vmw_user_bo, prime);
367 }
368
369 int vmw_dmabuf_init(struct vmw_private *dev_priv,
370                     struct vmw_dma_buffer *vmw_bo,
371                     size_t size, struct ttm_placement *placement,
372                     bool interruptible,
373                     void (*bo_free) (struct ttm_buffer_object *bo))
374 {
375         struct ttm_bo_device *bdev = &dev_priv->bdev;
376         size_t acc_size;
377         int ret;
378         bool user = (bo_free == &vmw_user_dmabuf_destroy);
379
380         BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
381
382         acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
383         memset(vmw_bo, 0, sizeof(*vmw_bo));
384
385         INIT_LIST_HEAD(&vmw_bo->res_list);
386
387         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
388                           ttm_bo_type_device, placement,
389                           0, interruptible, acc_size,
390                           NULL, NULL, bo_free);
391         return ret;
392 }
393
394 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
395 {
396         struct vmw_user_dma_buffer *vmw_user_bo;
397         struct ttm_base_object *base = *p_base;
398         struct ttm_buffer_object *bo;
399
400         *p_base = NULL;
401
402         if (unlikely(base == NULL))
403                 return;
404
405         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
406                                    prime.base);
407         bo = &vmw_user_bo->dma.base;
408         ttm_bo_unref(&bo);
409 }
410
411 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
412                                             enum ttm_ref_type ref_type)
413 {
414         struct vmw_user_dma_buffer *user_bo;
415         user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
416
417         switch (ref_type) {
418         case TTM_REF_SYNCCPU_WRITE:
419                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
420                 break;
421         default:
422                 BUG();
423         }
424 }
425
426 /**
427  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
428  *
429  * @dev_priv: Pointer to a struct device private.
430  * @tfile: Pointer to a struct ttm_object_file on which to register the user
431  * object.
432  * @size: Size of the dma buffer.
433  * @shareable: Boolean whether the buffer is shareable with other open files.
434  * @handle: Pointer to where the handle value should be assigned.
435  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
436  * should be assigned.
437  */
438 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
439                           struct ttm_object_file *tfile,
440                           uint32_t size,
441                           bool shareable,
442                           uint32_t *handle,
443                           struct vmw_dma_buffer **p_dma_buf,
444                           struct ttm_base_object **p_base)
445 {
446         struct vmw_user_dma_buffer *user_bo;
447         struct ttm_buffer_object *tmp;
448         int ret;
449
450         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
451         if (unlikely(!user_bo)) {
452                 DRM_ERROR("Failed to allocate a buffer.\n");
453                 return -ENOMEM;
454         }
455
456         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
457                               (dev_priv->has_mob) ?
458                               &vmw_sys_placement :
459                               &vmw_vram_sys_placement, true,
460                               &vmw_user_dmabuf_destroy);
461         if (unlikely(ret != 0))
462                 return ret;
463
464         tmp = ttm_bo_reference(&user_bo->dma.base);
465         ret = ttm_prime_object_init(tfile,
466                                     size,
467                                     &user_bo->prime,
468                                     shareable,
469                                     ttm_buffer_type,
470                                     &vmw_user_dmabuf_release,
471                                     &vmw_user_dmabuf_ref_obj_release);
472         if (unlikely(ret != 0)) {
473                 ttm_bo_unref(&tmp);
474                 goto out_no_base_object;
475         }
476
477         *p_dma_buf = &user_bo->dma;
478         if (p_base) {
479                 *p_base = &user_bo->prime.base;
480                 kref_get(&(*p_base)->refcount);
481         }
482         *handle = user_bo->prime.base.hash.key;
483
484 out_no_base_object:
485         return ret;
486 }
487
488 /**
489  * vmw_user_dmabuf_verify_access - verify access permissions on this
490  * buffer object.
491  *
492  * @bo: Pointer to the buffer object being accessed
493  * @tfile: Identifying the caller.
494  */
495 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
496                                   struct ttm_object_file *tfile)
497 {
498         struct vmw_user_dma_buffer *vmw_user_bo;
499
500         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
501                 return -EPERM;
502
503         vmw_user_bo = vmw_user_dma_buffer(bo);
504
505         /* Check that the caller has opened the object. */
506         if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
507                 return 0;
508
509         DRM_ERROR("Could not grant buffer access.\n");
510         return -EPERM;
511 }
512
513 /**
514  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
515  * access, idling previous GPU operations on the buffer and optionally
516  * blocking it for further command submissions.
517  *
518  * @user_bo: Pointer to the buffer object being grabbed for CPU access
519  * @tfile: Identifying the caller.
520  * @flags: Flags indicating how the grab should be performed.
521  *
522  * A blocking grab will be automatically released when @tfile is closed.
523  */
524 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
525                                         struct ttm_object_file *tfile,
526                                         uint32_t flags)
527 {
528         struct ttm_buffer_object *bo = &user_bo->dma.base;
529         bool existed;
530         int ret;
531
532         if (flags & drm_vmw_synccpu_allow_cs) {
533                 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
534                 long lret;
535
536                 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
537                                                            nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
538                 if (!lret)
539                         return -EBUSY;
540                 else if (lret < 0)
541                         return lret;
542                 return 0;
543         }
544
545         ret = ttm_bo_synccpu_write_grab
546                 (bo, !!(flags & drm_vmw_synccpu_dontblock));
547         if (unlikely(ret != 0))
548                 return ret;
549
550         ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
551                                  TTM_REF_SYNCCPU_WRITE, &existed, false);
552         if (ret != 0 || existed)
553                 ttm_bo_synccpu_write_release(&user_bo->dma.base);
554
555         return ret;
556 }
557
558 /**
559  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
560  * and unblock command submission on the buffer if blocked.
561  *
562  * @handle: Handle identifying the buffer object.
563  * @tfile: Identifying the caller.
564  * @flags: Flags indicating the type of release.
565  */
566 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
567                                            struct ttm_object_file *tfile,
568                                            uint32_t flags)
569 {
570         if (!(flags & drm_vmw_synccpu_allow_cs))
571                 return ttm_ref_object_base_unref(tfile, handle,
572                                                  TTM_REF_SYNCCPU_WRITE);
573
574         return 0;
575 }
576
577 /**
578  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
579  * functionality.
580  *
581  * @dev: Identifies the drm device.
582  * @data: Pointer to the ioctl argument.
583  * @file_priv: Identifies the caller.
584  *
585  * This function checks the ioctl arguments for validity and calls the
586  * relevant synccpu functions.
587  */
588 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
589                                   struct drm_file *file_priv)
590 {
591         struct drm_vmw_synccpu_arg *arg =
592                 (struct drm_vmw_synccpu_arg *) data;
593         struct vmw_dma_buffer *dma_buf;
594         struct vmw_user_dma_buffer *user_bo;
595         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
596         struct ttm_base_object *buffer_base;
597         int ret;
598
599         if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
600             || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
601                                drm_vmw_synccpu_dontblock |
602                                drm_vmw_synccpu_allow_cs)) != 0) {
603                 DRM_ERROR("Illegal synccpu flags.\n");
604                 return -EINVAL;
605         }
606
607         switch (arg->op) {
608         case drm_vmw_synccpu_grab:
609                 ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
610                                              &buffer_base);
611                 if (unlikely(ret != 0))
612                         return ret;
613
614                 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
615                                        dma);
616                 ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
617                 vmw_dmabuf_unreference(&dma_buf);
618                 ttm_base_object_unref(&buffer_base);
619                 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
620                              ret != -EBUSY)) {
621                         DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
622                                   (unsigned int) arg->handle);
623                         return ret;
624                 }
625                 break;
626         case drm_vmw_synccpu_release:
627                 ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
628                                                       arg->flags);
629                 if (unlikely(ret != 0)) {
630                         DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
631                                   (unsigned int) arg->handle);
632                         return ret;
633                 }
634                 break;
635         default:
636                 DRM_ERROR("Invalid synccpu operation.\n");
637                 return -EINVAL;
638         }
639
640         return 0;
641 }
642
643 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
644                            struct drm_file *file_priv)
645 {
646         struct vmw_private *dev_priv = vmw_priv(dev);
647         union drm_vmw_alloc_dmabuf_arg *arg =
648             (union drm_vmw_alloc_dmabuf_arg *)data;
649         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
650         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
651         struct vmw_dma_buffer *dma_buf;
652         uint32_t handle;
653         int ret;
654
655         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
656         if (unlikely(ret != 0))
657                 return ret;
658
659         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
660                                     req->size, false, &handle, &dma_buf,
661                                     NULL);
662         if (unlikely(ret != 0))
663                 goto out_no_dmabuf;
664
665         rep->handle = handle;
666         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
667         rep->cur_gmr_id = handle;
668         rep->cur_gmr_offset = 0;
669
670         vmw_dmabuf_unreference(&dma_buf);
671
672 out_no_dmabuf:
673         ttm_read_unlock(&dev_priv->reservation_sem);
674
675         return ret;
676 }
677
678 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
679                            struct drm_file *file_priv)
680 {
681         struct drm_vmw_unref_dmabuf_arg *arg =
682             (struct drm_vmw_unref_dmabuf_arg *)data;
683
684         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
685                                          arg->handle,
686                                          TTM_REF_USAGE);
687 }
688
689 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
690                            uint32_t handle, struct vmw_dma_buffer **out,
691                            struct ttm_base_object **p_base)
692 {
693         struct vmw_user_dma_buffer *vmw_user_bo;
694         struct ttm_base_object *base;
695
696         base = ttm_base_object_lookup(tfile, handle);
697         if (unlikely(base == NULL)) {
698                 pr_err("Invalid buffer object handle 0x%08lx\n",
699                        (unsigned long)handle);
700                 return -ESRCH;
701         }
702
703         if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
704                 ttm_base_object_unref(&base);
705                 pr_err("Invalid buffer object handle 0x%08lx\n",
706                        (unsigned long)handle);
707                 return -EINVAL;
708         }
709
710         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
711                                    prime.base);
712         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
713         if (p_base)
714                 *p_base = base;
715         else
716                 ttm_base_object_unref(&base);
717         *out = &vmw_user_bo->dma;
718
719         return 0;
720 }
721
722 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
723                               struct vmw_dma_buffer *dma_buf,
724                               uint32_t *handle)
725 {
726         struct vmw_user_dma_buffer *user_bo;
727
728         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
729                 return -EINVAL;
730
731         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
732
733         *handle = user_bo->prime.base.hash.key;
734         return ttm_ref_object_add(tfile, &user_bo->prime.base,
735                                   TTM_REF_USAGE, NULL, false);
736 }
737
738 /**
739  * vmw_dumb_create - Create a dumb kms buffer
740  *
741  * @file_priv: Pointer to a struct drm_file identifying the caller.
742  * @dev: Pointer to the drm device.
743  * @args: Pointer to a struct drm_mode_create_dumb structure
744  *
745  * This is a driver callback for the core drm create_dumb functionality.
746  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
747  * that the arguments have a different format.
748  */
749 int vmw_dumb_create(struct drm_file *file_priv,
750                     struct drm_device *dev,
751                     struct drm_mode_create_dumb *args)
752 {
753         struct vmw_private *dev_priv = vmw_priv(dev);
754         struct vmw_dma_buffer *dma_buf;
755         int ret;
756
757         args->pitch = args->width * ((args->bpp + 7) / 8);
758         args->size = args->pitch * args->height;
759
760         ret = ttm_read_lock(&dev_priv->reservation_sem, true);
761         if (unlikely(ret != 0))
762                 return ret;
763
764         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
765                                     args->size, false, &args->handle,
766                                     &dma_buf, NULL);
767         if (unlikely(ret != 0))
768                 goto out_no_dmabuf;
769
770         vmw_dmabuf_unreference(&dma_buf);
771 out_no_dmabuf:
772         ttm_read_unlock(&dev_priv->reservation_sem);
773         return ret;
774 }
775
776 /**
777  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
778  *
779  * @file_priv: Pointer to a struct drm_file identifying the caller.
780  * @dev: Pointer to the drm device.
781  * @handle: Handle identifying the dumb buffer.
782  * @offset: The address space offset returned.
783  *
784  * This is a driver callback for the core drm dumb_map_offset functionality.
785  */
786 int vmw_dumb_map_offset(struct drm_file *file_priv,
787                         struct drm_device *dev, uint32_t handle,
788                         uint64_t *offset)
789 {
790         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
791         struct vmw_dma_buffer *out_buf;
792         int ret;
793
794         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
795         if (ret != 0)
796                 return -EINVAL;
797
798         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
799         vmw_dmabuf_unreference(&out_buf);
800         return 0;
801 }
802
803 /**
804  * vmw_dumb_destroy - Destroy a dumb boffer
805  *
806  * @file_priv: Pointer to a struct drm_file identifying the caller.
807  * @dev: Pointer to the drm device.
808  * @handle: Handle identifying the dumb buffer.
809  *
810  * This is a driver callback for the core drm dumb_destroy functionality.
811  */
812 int vmw_dumb_destroy(struct drm_file *file_priv,
813                      struct drm_device *dev,
814                      uint32_t handle)
815 {
816         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
817                                          handle, TTM_REF_USAGE);
818 }
819
820 /**
821  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
822  *
823  * @res:            The resource for which to allocate a backup buffer.
824  * @interruptible:  Whether any sleeps during allocation should be
825  *                  performed while interruptible.
826  */
827 static int vmw_resource_buf_alloc(struct vmw_resource *res,
828                                   bool interruptible)
829 {
830         unsigned long size =
831                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
832         struct vmw_dma_buffer *backup;
833         int ret;
834
835         if (likely(res->backup)) {
836                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
837                 return 0;
838         }
839
840         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
841         if (unlikely(!backup))
842                 return -ENOMEM;
843
844         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
845                               res->func->backup_placement,
846                               interruptible,
847                               &vmw_dmabuf_bo_free);
848         if (unlikely(ret != 0))
849                 goto out_no_dmabuf;
850
851         res->backup = backup;
852
853 out_no_dmabuf:
854         return ret;
855 }
856
857 /**
858  * vmw_resource_do_validate - Make a resource up-to-date and visible
859  *                            to the device.
860  *
861  * @res:            The resource to make visible to the device.
862  * @val_buf:        Information about a buffer possibly
863  *                  containing backup data if a bind operation is needed.
864  *
865  * On hardware resource shortage, this function returns -EBUSY and
866  * should be retried once resources have been freed up.
867  */
868 static int vmw_resource_do_validate(struct vmw_resource *res,
869                                     struct ttm_validate_buffer *val_buf)
870 {
871         int ret = 0;
872         const struct vmw_res_func *func = res->func;
873
874         if (unlikely(res->id == -1)) {
875                 ret = func->create(res);
876                 if (unlikely(ret != 0))
877                         return ret;
878         }
879
880         if (func->bind &&
881             ((func->needs_backup && list_empty(&res->mob_head) &&
882               val_buf->bo != NULL) ||
883              (!func->needs_backup && val_buf->bo != NULL))) {
884                 ret = func->bind(res, val_buf);
885                 if (unlikely(ret != 0))
886                         goto out_bind_failed;
887                 if (func->needs_backup)
888                         list_add_tail(&res->mob_head, &res->backup->res_list);
889         }
890
891         /*
892          * Only do this on write operations, and move to
893          * vmw_resource_unreserve if it can be called after
894          * backup buffers have been unreserved. Otherwise
895          * sort out locking.
896          */
897         res->res_dirty = true;
898
899         return 0;
900
901 out_bind_failed:
902         func->destroy(res);
903
904         return ret;
905 }
906
907 /**
908  * vmw_resource_unreserve - Unreserve a resource previously reserved for
909  * command submission.
910  *
911  * @res:               Pointer to the struct vmw_resource to unreserve.
912  * @switch_backup:     Backup buffer has been switched.
913  * @new_backup:        Pointer to new backup buffer if command submission
914  *                     switched. May be NULL.
915  * @new_backup_offset: New backup offset if @switch_backup is true.
916  *
917  * Currently unreserving a resource means putting it back on the device's
918  * resource lru list, so that it can be evicted if necessary.
919  */
920 void vmw_resource_unreserve(struct vmw_resource *res,
921                             bool switch_backup,
922                             struct vmw_dma_buffer *new_backup,
923                             unsigned long new_backup_offset)
924 {
925         struct vmw_private *dev_priv = res->dev_priv;
926
927         if (!list_empty(&res->lru_head))
928                 return;
929
930         if (switch_backup && new_backup != res->backup) {
931                 if (res->backup) {
932                         lockdep_assert_held(&res->backup->base.resv->lock.base);
933                         list_del_init(&res->mob_head);
934                         vmw_dmabuf_unreference(&res->backup);
935                 }
936
937                 if (new_backup) {
938                         res->backup = vmw_dmabuf_reference(new_backup);
939                         lockdep_assert_held(&new_backup->base.resv->lock.base);
940                         list_add_tail(&res->mob_head, &new_backup->res_list);
941                 } else {
942                         res->backup = NULL;
943                 }
944         }
945         if (switch_backup)
946                 res->backup_offset = new_backup_offset;
947
948         if (!res->func->may_evict || res->id == -1 || res->pin_count)
949                 return;
950
951         write_lock(&dev_priv->resource_lock);
952         list_add_tail(&res->lru_head,
953                       &res->dev_priv->res_lru[res->func->res_type]);
954         write_unlock(&dev_priv->resource_lock);
955 }
956
957 /**
958  * vmw_resource_check_buffer - Check whether a backup buffer is needed
959  *                             for a resource and in that case, allocate
960  *                             one, reserve and validate it.
961  *
962  * @res:            The resource for which to allocate a backup buffer.
963  * @interruptible:  Whether any sleeps during allocation should be
964  *                  performed while interruptible.
965  * @val_buf:        On successful return contains data about the
966  *                  reserved and validated backup buffer.
967  */
968 static int
969 vmw_resource_check_buffer(struct vmw_resource *res,
970                           bool interruptible,
971                           struct ttm_validate_buffer *val_buf)
972 {
973         struct ttm_operation_ctx ctx = { true, false };
974         struct list_head val_list;
975         bool backup_dirty = false;
976         int ret;
977
978         if (unlikely(res->backup == NULL)) {
979                 ret = vmw_resource_buf_alloc(res, interruptible);
980                 if (unlikely(ret != 0))
981                         return ret;
982         }
983
984         INIT_LIST_HEAD(&val_list);
985         val_buf->bo = ttm_bo_reference(&res->backup->base);
986         val_buf->shared = false;
987         list_add_tail(&val_buf->head, &val_list);
988         ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
989         if (unlikely(ret != 0))
990                 goto out_no_reserve;
991
992         if (res->func->needs_backup && list_empty(&res->mob_head))
993                 return 0;
994
995         backup_dirty = res->backup_dirty;
996         ret = ttm_bo_validate(&res->backup->base,
997                               res->func->backup_placement,
998                               &ctx);
999
1000         if (unlikely(ret != 0))
1001                 goto out_no_validate;
1002
1003         return 0;
1004
1005 out_no_validate:
1006         ttm_eu_backoff_reservation(NULL, &val_list);
1007 out_no_reserve:
1008         ttm_bo_unref(&val_buf->bo);
1009         if (backup_dirty)
1010                 vmw_dmabuf_unreference(&res->backup);
1011
1012         return ret;
1013 }
1014
1015 /**
1016  * vmw_resource_reserve - Reserve a resource for command submission
1017  *
1018  * @res:            The resource to reserve.
1019  *
1020  * This function takes the resource off the LRU list and make sure
1021  * a backup buffer is present for guest-backed resources. However,
1022  * the buffer may not be bound to the resource at this point.
1023  *
1024  */
1025 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
1026                          bool no_backup)
1027 {
1028         struct vmw_private *dev_priv = res->dev_priv;
1029         int ret;
1030
1031         write_lock(&dev_priv->resource_lock);
1032         list_del_init(&res->lru_head);
1033         write_unlock(&dev_priv->resource_lock);
1034
1035         if (res->func->needs_backup && res->backup == NULL &&
1036             !no_backup) {
1037                 ret = vmw_resource_buf_alloc(res, interruptible);
1038                 if (unlikely(ret != 0)) {
1039                         DRM_ERROR("Failed to allocate a backup buffer "
1040                                   "of size %lu. bytes\n",
1041                                   (unsigned long) res->backup_size);
1042                         return ret;
1043                 }
1044         }
1045
1046         return 0;
1047 }
1048
1049 /**
1050  * vmw_resource_backoff_reservation - Unreserve and unreference a
1051  *                                    backup buffer
1052  *.
1053  * @val_buf:        Backup buffer information.
1054  */
1055 static void
1056 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1057 {
1058         struct list_head val_list;
1059
1060         if (likely(val_buf->bo == NULL))
1061                 return;
1062
1063         INIT_LIST_HEAD(&val_list);
1064         list_add_tail(&val_buf->head, &val_list);
1065         ttm_eu_backoff_reservation(NULL, &val_list);
1066         ttm_bo_unref(&val_buf->bo);
1067 }
1068
1069 /**
1070  * vmw_resource_do_evict - Evict a resource, and transfer its data
1071  *                         to a backup buffer.
1072  *
1073  * @res:            The resource to evict.
1074  * @interruptible:  Whether to wait interruptible.
1075  */
1076 static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1077 {
1078         struct ttm_validate_buffer val_buf;
1079         const struct vmw_res_func *func = res->func;
1080         int ret;
1081
1082         BUG_ON(!func->may_evict);
1083
1084         val_buf.bo = NULL;
1085         val_buf.shared = false;
1086         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1087         if (unlikely(ret != 0))
1088                 return ret;
1089
1090         if (unlikely(func->unbind != NULL &&
1091                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1092                 ret = func->unbind(res, res->res_dirty, &val_buf);
1093                 if (unlikely(ret != 0))
1094                         goto out_no_unbind;
1095                 list_del_init(&res->mob_head);
1096         }
1097         ret = func->destroy(res);
1098         res->backup_dirty = true;
1099         res->res_dirty = false;
1100 out_no_unbind:
1101         vmw_resource_backoff_reservation(&val_buf);
1102
1103         return ret;
1104 }
1105
1106
1107 /**
1108  * vmw_resource_validate - Make a resource up-to-date and visible
1109  *                         to the device.
1110  *
1111  * @res:            The resource to make visible to the device.
1112  *
1113  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1114  * be reserved and validated.
1115  * On hardware resource shortage, this function will repeatedly evict
1116  * resources of the same type until the validation succeeds.
1117  */
1118 int vmw_resource_validate(struct vmw_resource *res)
1119 {
1120         int ret;
1121         struct vmw_resource *evict_res;
1122         struct vmw_private *dev_priv = res->dev_priv;
1123         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1124         struct ttm_validate_buffer val_buf;
1125         unsigned err_count = 0;
1126
1127         if (!res->func->create)
1128                 return 0;
1129
1130         val_buf.bo = NULL;
1131         val_buf.shared = false;
1132         if (res->backup)
1133                 val_buf.bo = &res->backup->base;
1134         do {
1135                 ret = vmw_resource_do_validate(res, &val_buf);
1136                 if (likely(ret != -EBUSY))
1137                         break;
1138
1139                 write_lock(&dev_priv->resource_lock);
1140                 if (list_empty(lru_list) || !res->func->may_evict) {
1141                         DRM_ERROR("Out of device device resources "
1142                                   "for %s.\n", res->func->type_name);
1143                         ret = -EBUSY;
1144                         write_unlock(&dev_priv->resource_lock);
1145                         break;
1146                 }
1147
1148                 evict_res = vmw_resource_reference
1149                         (list_first_entry(lru_list, struct vmw_resource,
1150                                           lru_head));
1151                 list_del_init(&evict_res->lru_head);
1152
1153                 write_unlock(&dev_priv->resource_lock);
1154
1155                 ret = vmw_resource_do_evict(evict_res, true);
1156                 if (unlikely(ret != 0)) {
1157                         write_lock(&dev_priv->resource_lock);
1158                         list_add_tail(&evict_res->lru_head, lru_list);
1159                         write_unlock(&dev_priv->resource_lock);
1160                         if (ret == -ERESTARTSYS ||
1161                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1162                                 vmw_resource_unreference(&evict_res);
1163                                 goto out_no_validate;
1164                         }
1165                 }
1166
1167                 vmw_resource_unreference(&evict_res);
1168         } while (1);
1169
1170         if (unlikely(ret != 0))
1171                 goto out_no_validate;
1172         else if (!res->func->needs_backup && res->backup) {
1173                 list_del_init(&res->mob_head);
1174                 vmw_dmabuf_unreference(&res->backup);
1175         }
1176
1177         return 0;
1178
1179 out_no_validate:
1180         return ret;
1181 }
1182
1183 /**
1184  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1185  *                       object without unreserving it.
1186  *
1187  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1188  * @fence:          Pointer to the fence. If NULL, this function will
1189  *                  insert a fence into the command stream..
1190  *
1191  * Contrary to the ttm_eu version of this function, it takes only
1192  * a single buffer object instead of a list, and it also doesn't
1193  * unreserve the buffer object, which needs to be done separately.
1194  */
1195 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1196                          struct vmw_fence_obj *fence)
1197 {
1198         struct ttm_bo_device *bdev = bo->bdev;
1199
1200         struct vmw_private *dev_priv =
1201                 container_of(bdev, struct vmw_private, bdev);
1202
1203         if (fence == NULL) {
1204                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1205                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1206                 dma_fence_put(&fence->base);
1207         } else
1208                 reservation_object_add_excl_fence(bo->resv, &fence->base);
1209 }
1210
1211 /**
1212  * vmw_resource_move_notify - TTM move_notify_callback
1213  *
1214  * @bo: The TTM buffer object about to move.
1215  * @mem: The struct ttm_mem_reg indicating to what memory
1216  *       region the move is taking place.
1217  *
1218  * Evicts the Guest Backed hardware resource if the backup
1219  * buffer is being moved out of MOB memory.
1220  * Note that this function should not race with the resource
1221  * validation code as long as it accesses only members of struct
1222  * resource that remain static while bo::res is !NULL and
1223  * while we have @bo reserved. struct resource::backup is *not* a
1224  * static member. The resource validation code will take care
1225  * to set @bo::res to NULL, while having @bo reserved when the
1226  * buffer is no longer bound to the resource, so @bo:res can be
1227  * used to determine whether there is a need to unbind and whether
1228  * it is safe to unbind.
1229  */
1230 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231                               struct ttm_mem_reg *mem)
1232 {
1233         struct vmw_dma_buffer *dma_buf;
1234
1235         if (mem == NULL)
1236                 return;
1237
1238         if (bo->destroy != vmw_dmabuf_bo_free &&
1239             bo->destroy != vmw_user_dmabuf_destroy)
1240                 return;
1241
1242         dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1243
1244         /*
1245          * Kill any cached kernel maps before move. An optimization could
1246          * be to do this iff source or destination memory type is VRAM.
1247          */
1248         vmw_dma_buffer_unmap(dma_buf);
1249
1250         if (mem->mem_type != VMW_PL_MOB) {
1251                 struct vmw_resource *res, *n;
1252                 struct ttm_validate_buffer val_buf;
1253
1254                 val_buf.bo = bo;
1255                 val_buf.shared = false;
1256
1257                 list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1258
1259                         if (unlikely(res->func->unbind == NULL))
1260                                 continue;
1261
1262                         (void) res->func->unbind(res, true, &val_buf);
1263                         res->backup_dirty = true;
1264                         res->res_dirty = false;
1265                         list_del_init(&res->mob_head);
1266                 }
1267
1268                 (void) ttm_bo_wait(bo, false, false);
1269         }
1270 }
1271
1272
1273 /**
1274  * vmw_resource_swap_notify - swapout notify callback.
1275  *
1276  * @bo: The buffer object to be swapped out.
1277  */
1278 void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
1279 {
1280         if (bo->destroy != vmw_dmabuf_bo_free &&
1281             bo->destroy != vmw_user_dmabuf_destroy)
1282                 return;
1283
1284         /* Kill any cached kernel maps before swapout */
1285         vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
1286 }
1287
1288
1289 /**
1290  * vmw_query_readback_all - Read back cached query states
1291  *
1292  * @dx_query_mob: Buffer containing the DX query MOB
1293  *
1294  * Read back cached states from the device if they exist.  This function
1295  * assumings binding_mutex is held.
1296  */
1297 int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob)
1298 {
1299         struct vmw_resource *dx_query_ctx;
1300         struct vmw_private *dev_priv;
1301         struct {
1302                 SVGA3dCmdHeader header;
1303                 SVGA3dCmdDXReadbackAllQuery body;
1304         } *cmd;
1305
1306
1307         /* No query bound, so do nothing */
1308         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
1309                 return 0;
1310
1311         dx_query_ctx = dx_query_mob->dx_query_ctx;
1312         dev_priv     = dx_query_ctx->dev_priv;
1313
1314         cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
1315         if (unlikely(cmd == NULL)) {
1316                 DRM_ERROR("Failed reserving FIFO space for "
1317                           "query MOB read back.\n");
1318                 return -ENOMEM;
1319         }
1320
1321         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
1322         cmd->header.size = sizeof(cmd->body);
1323         cmd->body.cid    = dx_query_ctx->id;
1324
1325         vmw_fifo_commit(dev_priv, sizeof(*cmd));
1326
1327         /* Triggers a rebind the next time affected context is bound */
1328         dx_query_mob->dx_query_ctx = NULL;
1329
1330         return 0;
1331 }
1332
1333
1334
1335 /**
1336  * vmw_query_move_notify - Read back cached query states
1337  *
1338  * @bo: The TTM buffer object about to move.
1339  * @mem: The memory region @bo is moving to.
1340  *
1341  * Called before the query MOB is swapped out to read back cached query
1342  * states from the device.
1343  */
1344 void vmw_query_move_notify(struct ttm_buffer_object *bo,
1345                            struct ttm_mem_reg *mem)
1346 {
1347         struct vmw_dma_buffer *dx_query_mob;
1348         struct ttm_bo_device *bdev = bo->bdev;
1349         struct vmw_private *dev_priv;
1350
1351
1352         dev_priv = container_of(bdev, struct vmw_private, bdev);
1353
1354         mutex_lock(&dev_priv->binding_mutex);
1355
1356         dx_query_mob = container_of(bo, struct vmw_dma_buffer, base);
1357         if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
1358                 mutex_unlock(&dev_priv->binding_mutex);
1359                 return;
1360         }
1361
1362         /* If BO is being moved from MOB to system memory */
1363         if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
1364                 struct vmw_fence_obj *fence;
1365
1366                 (void) vmw_query_readback_all(dx_query_mob);
1367                 mutex_unlock(&dev_priv->binding_mutex);
1368
1369                 /* Create a fence and attach the BO to it */
1370                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1371                 vmw_fence_single_bo(bo, fence);
1372
1373                 if (fence != NULL)
1374                         vmw_fence_obj_unreference(&fence);
1375
1376                 (void) ttm_bo_wait(bo, false, false);
1377         } else
1378                 mutex_unlock(&dev_priv->binding_mutex);
1379
1380 }
1381
1382 /**
1383  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1384  *
1385  * @res:            The resource being queried.
1386  */
1387 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1388 {
1389         return res->func->needs_backup;
1390 }
1391
1392 /**
1393  * vmw_resource_evict_type - Evict all resources of a specific type
1394  *
1395  * @dev_priv:       Pointer to a device private struct
1396  * @type:           The resource type to evict
1397  *
1398  * To avoid thrashing starvation or as part of the hibernation sequence,
1399  * try to evict all evictable resources of a specific type.
1400  */
1401 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1402                                     enum vmw_res_type type)
1403 {
1404         struct list_head *lru_list = &dev_priv->res_lru[type];
1405         struct vmw_resource *evict_res;
1406         unsigned err_count = 0;
1407         int ret;
1408
1409         do {
1410                 write_lock(&dev_priv->resource_lock);
1411
1412                 if (list_empty(lru_list))
1413                         goto out_unlock;
1414
1415                 evict_res = vmw_resource_reference(
1416                         list_first_entry(lru_list, struct vmw_resource,
1417                                          lru_head));
1418                 list_del_init(&evict_res->lru_head);
1419                 write_unlock(&dev_priv->resource_lock);
1420
1421                 ret = vmw_resource_do_evict(evict_res, false);
1422                 if (unlikely(ret != 0)) {
1423                         write_lock(&dev_priv->resource_lock);
1424                         list_add_tail(&evict_res->lru_head, lru_list);
1425                         write_unlock(&dev_priv->resource_lock);
1426                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1427                                 vmw_resource_unreference(&evict_res);
1428                                 return;
1429                         }
1430                 }
1431
1432                 vmw_resource_unreference(&evict_res);
1433         } while (1);
1434
1435 out_unlock:
1436         write_unlock(&dev_priv->resource_lock);
1437 }
1438
1439 /**
1440  * vmw_resource_evict_all - Evict all evictable resources
1441  *
1442  * @dev_priv:       Pointer to a device private struct
1443  *
1444  * To avoid thrashing starvation or as part of the hibernation sequence,
1445  * evict all evictable resources. In particular this means that all
1446  * guest-backed resources that are registered with the device are
1447  * evicted and the OTable becomes clean.
1448  */
1449 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1450 {
1451         enum vmw_res_type type;
1452
1453         mutex_lock(&dev_priv->cmdbuf_mutex);
1454
1455         for (type = 0; type < vmw_res_max; ++type)
1456                 vmw_resource_evict_type(dev_priv, type);
1457
1458         mutex_unlock(&dev_priv->cmdbuf_mutex);
1459 }
1460
1461 /**
1462  * vmw_resource_pin - Add a pin reference on a resource
1463  *
1464  * @res: The resource to add a pin reference on
1465  *
1466  * This function adds a pin reference, and if needed validates the resource.
1467  * Having a pin reference means that the resource can never be evicted, and
1468  * its id will never change as long as there is a pin reference.
1469  * This function returns 0 on success and a negative error code on failure.
1470  */
1471 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
1472 {
1473         struct ttm_operation_ctx ctx = { interruptible, false };
1474         struct vmw_private *dev_priv = res->dev_priv;
1475         int ret;
1476
1477         ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1478         mutex_lock(&dev_priv->cmdbuf_mutex);
1479         ret = vmw_resource_reserve(res, interruptible, false);
1480         if (ret)
1481                 goto out_no_reserve;
1482
1483         if (res->pin_count == 0) {
1484                 struct vmw_dma_buffer *vbo = NULL;
1485
1486                 if (res->backup) {
1487                         vbo = res->backup;
1488
1489                         ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1490                         if (!vbo->pin_count) {
1491                                 ret = ttm_bo_validate
1492                                         (&vbo->base,
1493                                          res->func->backup_placement,
1494                                          &ctx);
1495                                 if (ret) {
1496                                         ttm_bo_unreserve(&vbo->base);
1497                                         goto out_no_validate;
1498                                 }
1499                         }
1500
1501                         /* Do we really need to pin the MOB as well? */
1502                         vmw_bo_pin_reserved(vbo, true);
1503                 }
1504                 ret = vmw_resource_validate(res);
1505                 if (vbo)
1506                         ttm_bo_unreserve(&vbo->base);
1507                 if (ret)
1508                         goto out_no_validate;
1509         }
1510         res->pin_count++;
1511
1512 out_no_validate:
1513         vmw_resource_unreserve(res, false, NULL, 0UL);
1514 out_no_reserve:
1515         mutex_unlock(&dev_priv->cmdbuf_mutex);
1516         ttm_write_unlock(&dev_priv->reservation_sem);
1517
1518         return ret;
1519 }
1520
1521 /**
1522  * vmw_resource_unpin - Remove a pin reference from a resource
1523  *
1524  * @res: The resource to remove a pin reference from
1525  *
1526  * Having a pin reference means that the resource can never be evicted, and
1527  * its id will never change as long as there is a pin reference.
1528  */
1529 void vmw_resource_unpin(struct vmw_resource *res)
1530 {
1531         struct vmw_private *dev_priv = res->dev_priv;
1532         int ret;
1533
1534         (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1535         mutex_lock(&dev_priv->cmdbuf_mutex);
1536
1537         ret = vmw_resource_reserve(res, false, true);
1538         WARN_ON(ret);
1539
1540         WARN_ON(res->pin_count == 0);
1541         if (--res->pin_count == 0 && res->backup) {
1542                 struct vmw_dma_buffer *vbo = res->backup;
1543
1544                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1545                 vmw_bo_pin_reserved(vbo, false);
1546                 ttm_bo_unreserve(&vbo->base);
1547         }
1548
1549         vmw_resource_unreserve(res, false, NULL, 0UL);
1550
1551         mutex_unlock(&dev_priv->cmdbuf_mutex);
1552         ttm_read_unlock(&dev_priv->reservation_sem);
1553 }
1554
1555 /**
1556  * vmw_res_type - Return the resource type
1557  *
1558  * @res: Pointer to the resource
1559  */
1560 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1561 {
1562         return res->func->res_type;
1563 }