drm/vmwgfx: Make surfaces prime-aware
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34
35 #define VMW_RES_EVICT_ERR_COUNT 10
36
37 struct vmw_user_dma_buffer {
38         struct ttm_base_object base;
39         struct vmw_dma_buffer dma;
40 };
41
42 struct vmw_bo_user_rep {
43         uint32_t handle;
44         uint64_t map_handle;
45 };
46
47 struct vmw_stream {
48         struct vmw_resource res;
49         uint32_t stream_id;
50 };
51
52 struct vmw_user_stream {
53         struct ttm_base_object base;
54         struct vmw_stream stream;
55 };
56
57
58 static uint64_t vmw_user_stream_size;
59
60 static const struct vmw_res_func vmw_stream_func = {
61         .res_type = vmw_res_stream,
62         .needs_backup = false,
63         .may_evict = false,
64         .type_name = "video streams",
65         .backup_placement = NULL,
66         .create = NULL,
67         .destroy = NULL,
68         .bind = NULL,
69         .unbind = NULL
70 };
71
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75         return container_of(bo, struct vmw_dma_buffer, base);
76 }
77
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82         return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87         kref_get(&res->kref);
88         return res;
89 }
90
91
92 /**
93  * vmw_resource_release_id - release a resource id to the id manager.
94  *
95  * @res: Pointer to the resource.
96  *
97  * Release the resource id to the resource id manager and set it to -1
98  */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101         struct vmw_private *dev_priv = res->dev_priv;
102         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104         write_lock(&dev_priv->resource_lock);
105         if (res->id != -1)
106                 idr_remove(idr, res->id);
107         res->id = -1;
108         write_unlock(&dev_priv->resource_lock);
109 }
110
111 static void vmw_resource_release(struct kref *kref)
112 {
113         struct vmw_resource *res =
114             container_of(kref, struct vmw_resource, kref);
115         struct vmw_private *dev_priv = res->dev_priv;
116         int id;
117         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118
119         res->avail = false;
120         list_del_init(&res->lru_head);
121         write_unlock(&dev_priv->resource_lock);
122         if (res->backup) {
123                 struct ttm_buffer_object *bo = &res->backup->base;
124
125                 ttm_bo_reserve(bo, false, false, false, 0);
126                 if (!list_empty(&res->mob_head) &&
127                     res->func->unbind != NULL) {
128                         struct ttm_validate_buffer val_buf;
129
130                         val_buf.bo = bo;
131                         res->func->unbind(res, false, &val_buf);
132                 }
133                 res->backup_dirty = false;
134                 list_del_init(&res->mob_head);
135                 ttm_bo_unreserve(bo);
136                 vmw_dmabuf_unreference(&res->backup);
137         }
138
139         if (likely(res->hw_destroy != NULL))
140                 res->hw_destroy(res);
141
142         id = res->id;
143         if (res->res_free != NULL)
144                 res->res_free(res);
145         else
146                 kfree(res);
147
148         write_lock(&dev_priv->resource_lock);
149
150         if (id != -1)
151                 idr_remove(idr, id);
152 }
153
154 void vmw_resource_unreference(struct vmw_resource **p_res)
155 {
156         struct vmw_resource *res = *p_res;
157         struct vmw_private *dev_priv = res->dev_priv;
158
159         *p_res = NULL;
160         write_lock(&dev_priv->resource_lock);
161         kref_put(&res->kref, vmw_resource_release);
162         write_unlock(&dev_priv->resource_lock);
163 }
164
165
166 /**
167  * vmw_resource_alloc_id - release a resource id to the id manager.
168  *
169  * @res: Pointer to the resource.
170  *
171  * Allocate the lowest free resource from the resource manager, and set
172  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173  */
174 int vmw_resource_alloc_id(struct vmw_resource *res)
175 {
176         struct vmw_private *dev_priv = res->dev_priv;
177         int ret;
178         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
179
180         BUG_ON(res->id != -1);
181
182         idr_preload(GFP_KERNEL);
183         write_lock(&dev_priv->resource_lock);
184
185         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186         if (ret >= 0)
187                 res->id = ret;
188
189         write_unlock(&dev_priv->resource_lock);
190         idr_preload_end();
191         return ret < 0 ? ret : 0;
192 }
193
194 /**
195  * vmw_resource_init - initialize a struct vmw_resource
196  *
197  * @dev_priv:       Pointer to a device private struct.
198  * @res:            The struct vmw_resource to initialize.
199  * @obj_type:       Resource object type.
200  * @delay_id:       Boolean whether to defer device id allocation until
201  *                  the first validation.
202  * @res_free:       Resource destructor.
203  * @func:           Resource function table.
204  */
205 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206                       bool delay_id,
207                       void (*res_free) (struct vmw_resource *res),
208                       const struct vmw_res_func *func)
209 {
210         kref_init(&res->kref);
211         res->hw_destroy = NULL;
212         res->res_free = res_free;
213         res->avail = false;
214         res->dev_priv = dev_priv;
215         res->func = func;
216         INIT_LIST_HEAD(&res->lru_head);
217         INIT_LIST_HEAD(&res->mob_head);
218         res->id = -1;
219         res->backup = NULL;
220         res->backup_offset = 0;
221         res->backup_dirty = false;
222         res->res_dirty = false;
223         if (delay_id)
224                 return 0;
225         else
226                 return vmw_resource_alloc_id(res);
227 }
228
229 /**
230  * vmw_resource_activate
231  *
232  * @res:        Pointer to the newly created resource
233  * @hw_destroy: Destroy function. NULL if none.
234  *
235  * Activate a resource after the hardware has been made aware of it.
236  * Set tye destroy function to @destroy. Typically this frees the
237  * resource and destroys the hardware resources associated with it.
238  * Activate basically means that the function vmw_resource_lookup will
239  * find it.
240  */
241 void vmw_resource_activate(struct vmw_resource *res,
242                            void (*hw_destroy) (struct vmw_resource *))
243 {
244         struct vmw_private *dev_priv = res->dev_priv;
245
246         write_lock(&dev_priv->resource_lock);
247         res->avail = true;
248         res->hw_destroy = hw_destroy;
249         write_unlock(&dev_priv->resource_lock);
250 }
251
252 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
253                                          struct idr *idr, int id)
254 {
255         struct vmw_resource *res;
256
257         read_lock(&dev_priv->resource_lock);
258         res = idr_find(idr, id);
259         if (res && res->avail)
260                 kref_get(&res->kref);
261         else
262                 res = NULL;
263         read_unlock(&dev_priv->resource_lock);
264
265         if (unlikely(res == NULL))
266                 return NULL;
267
268         return res;
269 }
270
271 /**
272  * vmw_user_resource_lookup_handle - lookup a struct resource from a
273  * TTM user-space handle and perform basic type checks
274  *
275  * @dev_priv:     Pointer to a device private struct
276  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
277  * @handle:       The TTM user-space handle
278  * @converter:    Pointer to an object describing the resource type
279  * @p_res:        On successful return the location pointed to will contain
280  *                a pointer to a refcounted struct vmw_resource.
281  *
282  * If the handle can't be found or is associated with an incorrect resource
283  * type, -EINVAL will be returned.
284  */
285 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
286                                     struct ttm_object_file *tfile,
287                                     uint32_t handle,
288                                     const struct vmw_user_resource_conv
289                                     *converter,
290                                     struct vmw_resource **p_res)
291 {
292         struct ttm_base_object *base;
293         struct vmw_resource *res;
294         int ret = -EINVAL;
295
296         base = ttm_base_object_lookup(tfile, handle);
297         if (unlikely(base == NULL))
298                 return -EINVAL;
299
300         if (unlikely(ttm_base_object_type(base) != converter->object_type))
301                 goto out_bad_resource;
302
303         res = converter->base_obj_to_res(base);
304
305         read_lock(&dev_priv->resource_lock);
306         if (!res->avail || res->res_free != converter->res_free) {
307                 read_unlock(&dev_priv->resource_lock);
308                 goto out_bad_resource;
309         }
310
311         kref_get(&res->kref);
312         read_unlock(&dev_priv->resource_lock);
313
314         *p_res = res;
315         ret = 0;
316
317 out_bad_resource:
318         ttm_base_object_unref(&base);
319
320         return ret;
321 }
322
323 /**
324  * Helper function that looks either a surface or dmabuf.
325  *
326  * The pointer this pointed at by out_surf and out_buf needs to be null.
327  */
328 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329                            struct ttm_object_file *tfile,
330                            uint32_t handle,
331                            struct vmw_surface **out_surf,
332                            struct vmw_dma_buffer **out_buf)
333 {
334         struct vmw_resource *res;
335         int ret;
336
337         BUG_ON(*out_surf || *out_buf);
338
339         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340                                               user_surface_converter,
341                                               &res);
342         if (!ret) {
343                 *out_surf = vmw_res_to_srf(res);
344                 return 0;
345         }
346
347         *out_surf = NULL;
348         ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
349         return ret;
350 }
351
352 /**
353  * Buffer management.
354  */
355 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356 {
357         struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
358
359         kfree(vmw_bo);
360 }
361
362 int vmw_dmabuf_init(struct vmw_private *dev_priv,
363                     struct vmw_dma_buffer *vmw_bo,
364                     size_t size, struct ttm_placement *placement,
365                     bool interruptible,
366                     void (*bo_free) (struct ttm_buffer_object *bo))
367 {
368         struct ttm_bo_device *bdev = &dev_priv->bdev;
369         size_t acc_size;
370         int ret;
371
372         BUG_ON(!bo_free);
373
374         acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
375         memset(vmw_bo, 0, sizeof(*vmw_bo));
376
377         INIT_LIST_HEAD(&vmw_bo->res_list);
378
379         ret = ttm_bo_init(bdev, &vmw_bo->base, size,
380                           ttm_bo_type_device, placement,
381                           0, interruptible,
382                           NULL, acc_size, NULL, bo_free);
383         return ret;
384 }
385
386 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387 {
388         struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389
390         ttm_base_object_kfree(vmw_user_bo, base);
391 }
392
393 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394 {
395         struct vmw_user_dma_buffer *vmw_user_bo;
396         struct ttm_base_object *base = *p_base;
397         struct ttm_buffer_object *bo;
398
399         *p_base = NULL;
400
401         if (unlikely(base == NULL))
402                 return;
403
404         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
405         bo = &vmw_user_bo->dma.base;
406         ttm_bo_unref(&bo);
407 }
408
409 /**
410  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
411  *
412  * @dev_priv: Pointer to a struct device private.
413  * @tfile: Pointer to a struct ttm_object_file on which to register the user
414  * object.
415  * @size: Size of the dma buffer.
416  * @shareable: Boolean whether the buffer is shareable with other open files.
417  * @handle: Pointer to where the handle value should be assigned.
418  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
419  * should be assigned.
420  */
421 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
422                           struct ttm_object_file *tfile,
423                           uint32_t size,
424                           bool shareable,
425                           uint32_t *handle,
426                           struct vmw_dma_buffer **p_dma_buf)
427 {
428         struct vmw_user_dma_buffer *user_bo;
429         struct ttm_buffer_object *tmp;
430         int ret;
431
432         user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
433         if (unlikely(user_bo == NULL)) {
434                 DRM_ERROR("Failed to allocate a buffer.\n");
435                 return -ENOMEM;
436         }
437
438         ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
439                               &vmw_vram_sys_placement, true,
440                               &vmw_user_dmabuf_destroy);
441         if (unlikely(ret != 0))
442                 return ret;
443
444         tmp = ttm_bo_reference(&user_bo->dma.base);
445         ret = ttm_base_object_init(tfile,
446                                    &user_bo->base,
447                                    shareable,
448                                    ttm_buffer_type,
449                                    &vmw_user_dmabuf_release, NULL);
450         if (unlikely(ret != 0)) {
451                 ttm_bo_unref(&tmp);
452                 goto out_no_base_object;
453         }
454
455         *p_dma_buf = &user_bo->dma;
456         *handle = user_bo->base.hash.key;
457
458 out_no_base_object:
459         return ret;
460 }
461
462 /**
463  * vmw_user_dmabuf_verify_access - verify access permissions on this
464  * buffer object.
465  *
466  * @bo: Pointer to the buffer object being accessed
467  * @tfile: Identifying the caller.
468  */
469 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
470                                   struct ttm_object_file *tfile)
471 {
472         struct vmw_user_dma_buffer *vmw_user_bo;
473
474         if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
475                 return -EPERM;
476
477         vmw_user_bo = vmw_user_dma_buffer(bo);
478         return (vmw_user_bo->base.tfile == tfile ||
479         vmw_user_bo->base.shareable) ? 0 : -EPERM;
480 }
481
482 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
483                            struct drm_file *file_priv)
484 {
485         struct vmw_private *dev_priv = vmw_priv(dev);
486         union drm_vmw_alloc_dmabuf_arg *arg =
487             (union drm_vmw_alloc_dmabuf_arg *)data;
488         struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
489         struct drm_vmw_dmabuf_rep *rep = &arg->rep;
490         struct vmw_dma_buffer *dma_buf;
491         uint32_t handle;
492         struct vmw_master *vmaster = vmw_master(file_priv->master);
493         int ret;
494
495         ret = ttm_read_lock(&vmaster->lock, true);
496         if (unlikely(ret != 0))
497                 return ret;
498
499         ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
500                                     req->size, false, &handle, &dma_buf);
501         if (unlikely(ret != 0))
502                 goto out_no_dmabuf;
503
504         rep->handle = handle;
505         rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
506         rep->cur_gmr_id = handle;
507         rep->cur_gmr_offset = 0;
508
509         vmw_dmabuf_unreference(&dma_buf);
510
511 out_no_dmabuf:
512         ttm_read_unlock(&vmaster->lock);
513
514         return ret;
515 }
516
517 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
518                            struct drm_file *file_priv)
519 {
520         struct drm_vmw_unref_dmabuf_arg *arg =
521             (struct drm_vmw_unref_dmabuf_arg *)data;
522
523         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
524                                          arg->handle,
525                                          TTM_REF_USAGE);
526 }
527
528 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
529                            uint32_t handle, struct vmw_dma_buffer **out)
530 {
531         struct vmw_user_dma_buffer *vmw_user_bo;
532         struct ttm_base_object *base;
533
534         base = ttm_base_object_lookup(tfile, handle);
535         if (unlikely(base == NULL)) {
536                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
537                        (unsigned long)handle);
538                 return -ESRCH;
539         }
540
541         if (unlikely(base->object_type != ttm_buffer_type)) {
542                 ttm_base_object_unref(&base);
543                 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
544                        (unsigned long)handle);
545                 return -EINVAL;
546         }
547
548         vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
549         (void)ttm_bo_reference(&vmw_user_bo->dma.base);
550         ttm_base_object_unref(&base);
551         *out = &vmw_user_bo->dma;
552
553         return 0;
554 }
555
556 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
557                               struct vmw_dma_buffer *dma_buf)
558 {
559         struct vmw_user_dma_buffer *user_bo;
560
561         if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
562                 return -EINVAL;
563
564         user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
565         return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
566 }
567
568 /*
569  * Stream management
570  */
571
572 static void vmw_stream_destroy(struct vmw_resource *res)
573 {
574         struct vmw_private *dev_priv = res->dev_priv;
575         struct vmw_stream *stream;
576         int ret;
577
578         DRM_INFO("%s: unref\n", __func__);
579         stream = container_of(res, struct vmw_stream, res);
580
581         ret = vmw_overlay_unref(dev_priv, stream->stream_id);
582         WARN_ON(ret != 0);
583 }
584
585 static int vmw_stream_init(struct vmw_private *dev_priv,
586                            struct vmw_stream *stream,
587                            void (*res_free) (struct vmw_resource *res))
588 {
589         struct vmw_resource *res = &stream->res;
590         int ret;
591
592         ret = vmw_resource_init(dev_priv, res, false, res_free,
593                                 &vmw_stream_func);
594
595         if (unlikely(ret != 0)) {
596                 if (res_free == NULL)
597                         kfree(stream);
598                 else
599                         res_free(&stream->res);
600                 return ret;
601         }
602
603         ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
604         if (ret) {
605                 vmw_resource_unreference(&res);
606                 return ret;
607         }
608
609         DRM_INFO("%s: claimed\n", __func__);
610
611         vmw_resource_activate(&stream->res, vmw_stream_destroy);
612         return 0;
613 }
614
615 static void vmw_user_stream_free(struct vmw_resource *res)
616 {
617         struct vmw_user_stream *stream =
618             container_of(res, struct vmw_user_stream, stream.res);
619         struct vmw_private *dev_priv = res->dev_priv;
620
621         ttm_base_object_kfree(stream, base);
622         ttm_mem_global_free(vmw_mem_glob(dev_priv),
623                             vmw_user_stream_size);
624 }
625
626 /**
627  * This function is called when user space has no more references on the
628  * base object. It releases the base-object's reference on the resource object.
629  */
630
631 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
632 {
633         struct ttm_base_object *base = *p_base;
634         struct vmw_user_stream *stream =
635             container_of(base, struct vmw_user_stream, base);
636         struct vmw_resource *res = &stream->stream.res;
637
638         *p_base = NULL;
639         vmw_resource_unreference(&res);
640 }
641
642 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
643                            struct drm_file *file_priv)
644 {
645         struct vmw_private *dev_priv = vmw_priv(dev);
646         struct vmw_resource *res;
647         struct vmw_user_stream *stream;
648         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
649         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
650         struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
651         int ret = 0;
652
653
654         res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
655         if (unlikely(res == NULL))
656                 return -EINVAL;
657
658         if (res->res_free != &vmw_user_stream_free) {
659                 ret = -EINVAL;
660                 goto out;
661         }
662
663         stream = container_of(res, struct vmw_user_stream, stream.res);
664         if (stream->base.tfile != tfile) {
665                 ret = -EINVAL;
666                 goto out;
667         }
668
669         ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
670 out:
671         vmw_resource_unreference(&res);
672         return ret;
673 }
674
675 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
676                            struct drm_file *file_priv)
677 {
678         struct vmw_private *dev_priv = vmw_priv(dev);
679         struct vmw_user_stream *stream;
680         struct vmw_resource *res;
681         struct vmw_resource *tmp;
682         struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
683         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
684         struct vmw_master *vmaster = vmw_master(file_priv->master);
685         int ret;
686
687         /*
688          * Approximate idr memory usage with 128 bytes. It will be limited
689          * by maximum number_of streams anyway?
690          */
691
692         if (unlikely(vmw_user_stream_size == 0))
693                 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
694
695         ret = ttm_read_lock(&vmaster->lock, true);
696         if (unlikely(ret != 0))
697                 return ret;
698
699         ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
700                                    vmw_user_stream_size,
701                                    false, true);
702         if (unlikely(ret != 0)) {
703                 if (ret != -ERESTARTSYS)
704                         DRM_ERROR("Out of graphics memory for stream"
705                                   " creation.\n");
706                 goto out_unlock;
707         }
708
709
710         stream = kmalloc(sizeof(*stream), GFP_KERNEL);
711         if (unlikely(stream == NULL)) {
712                 ttm_mem_global_free(vmw_mem_glob(dev_priv),
713                                     vmw_user_stream_size);
714                 ret = -ENOMEM;
715                 goto out_unlock;
716         }
717
718         res = &stream->stream.res;
719         stream->base.shareable = false;
720         stream->base.tfile = NULL;
721
722         /*
723          * From here on, the destructor takes over resource freeing.
724          */
725
726         ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
727         if (unlikely(ret != 0))
728                 goto out_unlock;
729
730         tmp = vmw_resource_reference(res);
731         ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
732                                    &vmw_user_stream_base_release, NULL);
733
734         if (unlikely(ret != 0)) {
735                 vmw_resource_unreference(&tmp);
736                 goto out_err;
737         }
738
739         arg->stream_id = res->id;
740 out_err:
741         vmw_resource_unreference(&res);
742 out_unlock:
743         ttm_read_unlock(&vmaster->lock);
744         return ret;
745 }
746
747 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
748                            struct ttm_object_file *tfile,
749                            uint32_t *inout_id, struct vmw_resource **out)
750 {
751         struct vmw_user_stream *stream;
752         struct vmw_resource *res;
753         int ret;
754
755         res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
756                                   *inout_id);
757         if (unlikely(res == NULL))
758                 return -EINVAL;
759
760         if (res->res_free != &vmw_user_stream_free) {
761                 ret = -EINVAL;
762                 goto err_ref;
763         }
764
765         stream = container_of(res, struct vmw_user_stream, stream.res);
766         if (stream->base.tfile != tfile) {
767                 ret = -EPERM;
768                 goto err_ref;
769         }
770
771         *inout_id = stream->stream.stream_id;
772         *out = res;
773         return 0;
774 err_ref:
775         vmw_resource_unreference(&res);
776         return ret;
777 }
778
779
780 int vmw_dumb_create(struct drm_file *file_priv,
781                     struct drm_device *dev,
782                     struct drm_mode_create_dumb *args)
783 {
784         struct vmw_private *dev_priv = vmw_priv(dev);
785         struct vmw_master *vmaster = vmw_master(file_priv->master);
786         struct vmw_user_dma_buffer *vmw_user_bo;
787         struct ttm_buffer_object *tmp;
788         int ret;
789
790         args->pitch = args->width * ((args->bpp + 7) / 8);
791         args->size = args->pitch * args->height;
792
793         vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
794         if (vmw_user_bo == NULL)
795                 return -ENOMEM;
796
797         ret = ttm_read_lock(&vmaster->lock, true);
798         if (ret != 0) {
799                 kfree(vmw_user_bo);
800                 return ret;
801         }
802
803         ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
804                               &vmw_vram_sys_placement, true,
805                               &vmw_user_dmabuf_destroy);
806         if (ret != 0)
807                 goto out_no_dmabuf;
808
809         tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
810         ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
811                                    &vmw_user_bo->base,
812                                    false,
813                                    ttm_buffer_type,
814                                    &vmw_user_dmabuf_release, NULL);
815         if (unlikely(ret != 0))
816                 goto out_no_base_object;
817
818         args->handle = vmw_user_bo->base.hash.key;
819
820 out_no_base_object:
821         ttm_bo_unref(&tmp);
822 out_no_dmabuf:
823         ttm_read_unlock(&vmaster->lock);
824         return ret;
825 }
826
827 int vmw_dumb_map_offset(struct drm_file *file_priv,
828                         struct drm_device *dev, uint32_t handle,
829                         uint64_t *offset)
830 {
831         struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
832         struct vmw_dma_buffer *out_buf;
833         int ret;
834
835         ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
836         if (ret != 0)
837                 return -EINVAL;
838
839         *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
840         vmw_dmabuf_unreference(&out_buf);
841         return 0;
842 }
843
844 int vmw_dumb_destroy(struct drm_file *file_priv,
845                      struct drm_device *dev,
846                      uint32_t handle)
847 {
848         return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
849                                          handle, TTM_REF_USAGE);
850 }
851
852 /**
853  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
854  *
855  * @res:            The resource for which to allocate a backup buffer.
856  * @interruptible:  Whether any sleeps during allocation should be
857  *                  performed while interruptible.
858  */
859 static int vmw_resource_buf_alloc(struct vmw_resource *res,
860                                   bool interruptible)
861 {
862         unsigned long size =
863                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
864         struct vmw_dma_buffer *backup;
865         int ret;
866
867         if (likely(res->backup)) {
868                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
869                 return 0;
870         }
871
872         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
873         if (unlikely(backup == NULL))
874                 return -ENOMEM;
875
876         ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
877                               res->func->backup_placement,
878                               interruptible,
879                               &vmw_dmabuf_bo_free);
880         if (unlikely(ret != 0))
881                 goto out_no_dmabuf;
882
883         res->backup = backup;
884
885 out_no_dmabuf:
886         return ret;
887 }
888
889 /**
890  * vmw_resource_do_validate - Make a resource up-to-date and visible
891  *                            to the device.
892  *
893  * @res:            The resource to make visible to the device.
894  * @val_buf:        Information about a buffer possibly
895  *                  containing backup data if a bind operation is needed.
896  *
897  * On hardware resource shortage, this function returns -EBUSY and
898  * should be retried once resources have been freed up.
899  */
900 static int vmw_resource_do_validate(struct vmw_resource *res,
901                                     struct ttm_validate_buffer *val_buf)
902 {
903         int ret = 0;
904         const struct vmw_res_func *func = res->func;
905
906         if (unlikely(res->id == -1)) {
907                 ret = func->create(res);
908                 if (unlikely(ret != 0))
909                         return ret;
910         }
911
912         if (func->bind &&
913             ((func->needs_backup && list_empty(&res->mob_head) &&
914               val_buf->bo != NULL) ||
915              (!func->needs_backup && val_buf->bo != NULL))) {
916                 ret = func->bind(res, val_buf);
917                 if (unlikely(ret != 0))
918                         goto out_bind_failed;
919                 if (func->needs_backup)
920                         list_add_tail(&res->mob_head, &res->backup->res_list);
921         }
922
923         /*
924          * Only do this on write operations, and move to
925          * vmw_resource_unreserve if it can be called after
926          * backup buffers have been unreserved. Otherwise
927          * sort out locking.
928          */
929         res->res_dirty = true;
930
931         return 0;
932
933 out_bind_failed:
934         func->destroy(res);
935
936         return ret;
937 }
938
939 /**
940  * vmw_resource_unreserve - Unreserve a resource previously reserved for
941  * command submission.
942  *
943  * @res:               Pointer to the struct vmw_resource to unreserve.
944  * @new_backup:        Pointer to new backup buffer if command submission
945  *                     switched.
946  * @new_backup_offset: New backup offset if @new_backup is !NULL.
947  *
948  * Currently unreserving a resource means putting it back on the device's
949  * resource lru list, so that it can be evicted if necessary.
950  */
951 void vmw_resource_unreserve(struct vmw_resource *res,
952                             struct vmw_dma_buffer *new_backup,
953                             unsigned long new_backup_offset)
954 {
955         struct vmw_private *dev_priv = res->dev_priv;
956
957         if (!list_empty(&res->lru_head))
958                 return;
959
960         if (new_backup && new_backup != res->backup) {
961
962                 if (res->backup) {
963                         lockdep_assert_held(&res->backup->base.resv->lock.base);
964                         list_del_init(&res->mob_head);
965                         vmw_dmabuf_unreference(&res->backup);
966                 }
967
968                 res->backup = vmw_dmabuf_reference(new_backup);
969                 lockdep_assert_held(&new_backup->base.resv->lock.base);
970                 list_add_tail(&res->mob_head, &new_backup->res_list);
971         }
972         if (new_backup)
973                 res->backup_offset = new_backup_offset;
974
975         if (!res->func->may_evict || res->id == -1)
976                 return;
977
978         write_lock(&dev_priv->resource_lock);
979         list_add_tail(&res->lru_head,
980                       &res->dev_priv->res_lru[res->func->res_type]);
981         write_unlock(&dev_priv->resource_lock);
982 }
983
984 /**
985  * vmw_resource_check_buffer - Check whether a backup buffer is needed
986  *                             for a resource and in that case, allocate
987  *                             one, reserve and validate it.
988  *
989  * @res:            The resource for which to allocate a backup buffer.
990  * @interruptible:  Whether any sleeps during allocation should be
991  *                  performed while interruptible.
992  * @val_buf:        On successful return contains data about the
993  *                  reserved and validated backup buffer.
994  */
995 static int
996 vmw_resource_check_buffer(struct vmw_resource *res,
997                           bool interruptible,
998                           struct ttm_validate_buffer *val_buf)
999 {
1000         struct list_head val_list;
1001         bool backup_dirty = false;
1002         int ret;
1003
1004         if (unlikely(res->backup == NULL)) {
1005                 ret = vmw_resource_buf_alloc(res, interruptible);
1006                 if (unlikely(ret != 0))
1007                         return ret;
1008         }
1009
1010         INIT_LIST_HEAD(&val_list);
1011         val_buf->bo = ttm_bo_reference(&res->backup->base);
1012         list_add_tail(&val_buf->head, &val_list);
1013         ret = ttm_eu_reserve_buffers(NULL, &val_list);
1014         if (unlikely(ret != 0))
1015                 goto out_no_reserve;
1016
1017         if (res->func->needs_backup && list_empty(&res->mob_head))
1018                 return 0;
1019
1020         backup_dirty = res->backup_dirty;
1021         ret = ttm_bo_validate(&res->backup->base,
1022                               res->func->backup_placement,
1023                               true, false);
1024
1025         if (unlikely(ret != 0))
1026                 goto out_no_validate;
1027
1028         return 0;
1029
1030 out_no_validate:
1031         ttm_eu_backoff_reservation(NULL, &val_list);
1032 out_no_reserve:
1033         ttm_bo_unref(&val_buf->bo);
1034         if (backup_dirty)
1035                 vmw_dmabuf_unreference(&res->backup);
1036
1037         return ret;
1038 }
1039
1040 /**
1041  * vmw_resource_reserve - Reserve a resource for command submission
1042  *
1043  * @res:            The resource to reserve.
1044  *
1045  * This function takes the resource off the LRU list and make sure
1046  * a backup buffer is present for guest-backed resources. However,
1047  * the buffer may not be bound to the resource at this point.
1048  *
1049  */
1050 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1051 {
1052         struct vmw_private *dev_priv = res->dev_priv;
1053         int ret;
1054
1055         write_lock(&dev_priv->resource_lock);
1056         list_del_init(&res->lru_head);
1057         write_unlock(&dev_priv->resource_lock);
1058
1059         if (res->func->needs_backup && res->backup == NULL &&
1060             !no_backup) {
1061                 ret = vmw_resource_buf_alloc(res, true);
1062                 if (unlikely(ret != 0))
1063                         return ret;
1064         }
1065
1066         return 0;
1067 }
1068
1069 /**
1070  * vmw_resource_backoff_reservation - Unreserve and unreference a
1071  *                                    backup buffer
1072  *.
1073  * @val_buf:        Backup buffer information.
1074  */
1075 static void
1076 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1077 {
1078         struct list_head val_list;
1079
1080         if (likely(val_buf->bo == NULL))
1081                 return;
1082
1083         INIT_LIST_HEAD(&val_list);
1084         list_add_tail(&val_buf->head, &val_list);
1085         ttm_eu_backoff_reservation(NULL, &val_list);
1086         ttm_bo_unref(&val_buf->bo);
1087 }
1088
1089 /**
1090  * vmw_resource_do_evict - Evict a resource, and transfer its data
1091  *                         to a backup buffer.
1092  *
1093  * @res:            The resource to evict.
1094  * @interruptible:  Whether to wait interruptible.
1095  */
1096 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1097 {
1098         struct ttm_validate_buffer val_buf;
1099         const struct vmw_res_func *func = res->func;
1100         int ret;
1101
1102         BUG_ON(!func->may_evict);
1103
1104         val_buf.bo = NULL;
1105         ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1106         if (unlikely(ret != 0))
1107                 return ret;
1108
1109         if (unlikely(func->unbind != NULL &&
1110                      (!func->needs_backup || !list_empty(&res->mob_head)))) {
1111                 ret = func->unbind(res, res->res_dirty, &val_buf);
1112                 if (unlikely(ret != 0))
1113                         goto out_no_unbind;
1114                 list_del_init(&res->mob_head);
1115         }
1116         ret = func->destroy(res);
1117         res->backup_dirty = true;
1118         res->res_dirty = false;
1119 out_no_unbind:
1120         vmw_resource_backoff_reservation(&val_buf);
1121
1122         return ret;
1123 }
1124
1125
1126 /**
1127  * vmw_resource_validate - Make a resource up-to-date and visible
1128  *                         to the device.
1129  *
1130  * @res:            The resource to make visible to the device.
1131  *
1132  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1133  * be reserved and validated.
1134  * On hardware resource shortage, this function will repeatedly evict
1135  * resources of the same type until the validation succeeds.
1136  */
1137 int vmw_resource_validate(struct vmw_resource *res)
1138 {
1139         int ret;
1140         struct vmw_resource *evict_res;
1141         struct vmw_private *dev_priv = res->dev_priv;
1142         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1143         struct ttm_validate_buffer val_buf;
1144         unsigned err_count = 0;
1145
1146         if (likely(!res->func->may_evict))
1147                 return 0;
1148
1149         val_buf.bo = NULL;
1150         if (res->backup)
1151                 val_buf.bo = &res->backup->base;
1152         do {
1153                 ret = vmw_resource_do_validate(res, &val_buf);
1154                 if (likely(ret != -EBUSY))
1155                         break;
1156
1157                 write_lock(&dev_priv->resource_lock);
1158                 if (list_empty(lru_list) || !res->func->may_evict) {
1159                         DRM_ERROR("Out of device device resources "
1160                                   "for %s.\n", res->func->type_name);
1161                         ret = -EBUSY;
1162                         write_unlock(&dev_priv->resource_lock);
1163                         break;
1164                 }
1165
1166                 evict_res = vmw_resource_reference
1167                         (list_first_entry(lru_list, struct vmw_resource,
1168                                           lru_head));
1169                 list_del_init(&evict_res->lru_head);
1170
1171                 write_unlock(&dev_priv->resource_lock);
1172
1173                 ret = vmw_resource_do_evict(evict_res, true);
1174                 if (unlikely(ret != 0)) {
1175                         write_lock(&dev_priv->resource_lock);
1176                         list_add_tail(&evict_res->lru_head, lru_list);
1177                         write_unlock(&dev_priv->resource_lock);
1178                         if (ret == -ERESTARTSYS ||
1179                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1180                                 vmw_resource_unreference(&evict_res);
1181                                 goto out_no_validate;
1182                         }
1183                 }
1184
1185                 vmw_resource_unreference(&evict_res);
1186         } while (1);
1187
1188         if (unlikely(ret != 0))
1189                 goto out_no_validate;
1190         else if (!res->func->needs_backup && res->backup) {
1191                 list_del_init(&res->mob_head);
1192                 vmw_dmabuf_unreference(&res->backup);
1193         }
1194
1195         return 0;
1196
1197 out_no_validate:
1198         return ret;
1199 }
1200
1201 /**
1202  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1203  *                       object without unreserving it.
1204  *
1205  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1206  * @fence:          Pointer to the fence. If NULL, this function will
1207  *                  insert a fence into the command stream..
1208  *
1209  * Contrary to the ttm_eu version of this function, it takes only
1210  * a single buffer object instead of a list, and it also doesn't
1211  * unreserve the buffer object, which needs to be done separately.
1212  */
1213 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1214                          struct vmw_fence_obj *fence)
1215 {
1216         struct ttm_bo_device *bdev = bo->bdev;
1217         struct ttm_bo_driver *driver = bdev->driver;
1218         struct vmw_fence_obj *old_fence_obj;
1219         struct vmw_private *dev_priv =
1220                 container_of(bdev, struct vmw_private, bdev);
1221
1222         if (fence == NULL)
1223                 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1224         else
1225                 driver->sync_obj_ref(fence);
1226
1227         spin_lock(&bdev->fence_lock);
1228
1229         old_fence_obj = bo->sync_obj;
1230         bo->sync_obj = fence;
1231
1232         spin_unlock(&bdev->fence_lock);
1233
1234         if (old_fence_obj)
1235                 vmw_fence_obj_unreference(&old_fence_obj);
1236 }
1237
1238 /**
1239  * vmw_resource_move_notify - TTM move_notify_callback
1240  *
1241  * @bo:             The TTM buffer object about to move.
1242  * @mem:            The truct ttm_mem_reg indicating to what memory
1243  *                  region the move is taking place.
1244  *
1245  * For now does nothing.
1246  */
1247 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1248                               struct ttm_mem_reg *mem)
1249 {
1250 }
1251
1252 /**
1253  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1254  *
1255  * @res:            The resource being queried.
1256  */
1257 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1258 {
1259         return res->func->needs_backup;
1260 }
1261
1262 /**
1263  * vmw_resource_evict_type - Evict all resources of a specific type
1264  *
1265  * @dev_priv:       Pointer to a device private struct
1266  * @type:           The resource type to evict
1267  *
1268  * To avoid thrashing starvation or as part of the hibernation sequence,
1269  * try to evict all evictable resources of a specific type.
1270  */
1271 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1272                                     enum vmw_res_type type)
1273 {
1274         struct list_head *lru_list = &dev_priv->res_lru[type];
1275         struct vmw_resource *evict_res;
1276         unsigned err_count = 0;
1277         int ret;
1278
1279         do {
1280                 write_lock(&dev_priv->resource_lock);
1281
1282                 if (list_empty(lru_list))
1283                         goto out_unlock;
1284
1285                 evict_res = vmw_resource_reference(
1286                         list_first_entry(lru_list, struct vmw_resource,
1287                                          lru_head));
1288                 list_del_init(&evict_res->lru_head);
1289                 write_unlock(&dev_priv->resource_lock);
1290
1291                 ret = vmw_resource_do_evict(evict_res, false);
1292                 if (unlikely(ret != 0)) {
1293                         write_lock(&dev_priv->resource_lock);
1294                         list_add_tail(&evict_res->lru_head, lru_list);
1295                         write_unlock(&dev_priv->resource_lock);
1296                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1297                                 vmw_resource_unreference(&evict_res);
1298                                 return;
1299                         }
1300                 }
1301
1302                 vmw_resource_unreference(&evict_res);
1303         } while (1);
1304
1305 out_unlock:
1306         write_unlock(&dev_priv->resource_lock);
1307 }
1308
1309 /**
1310  * vmw_resource_evict_all - Evict all evictable resources
1311  *
1312  * @dev_priv:       Pointer to a device private struct
1313  *
1314  * To avoid thrashing starvation or as part of the hibernation sequence,
1315  * evict all evictable resources. In particular this means that all
1316  * guest-backed resources that are registered with the device are
1317  * evicted and the OTable becomes clean.
1318  */
1319 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1320 {
1321         enum vmw_res_type type;
1322
1323         mutex_lock(&dev_priv->cmdbuf_mutex);
1324
1325         for (type = 0; type < vmw_res_max; ++type)
1326                 vmw_resource_evict_type(dev_priv, type);
1327
1328         mutex_unlock(&dev_priv->cmdbuf_mutex);
1329 }