1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer {
38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma;
42 struct vmw_bo_user_rep {
48 struct vmw_resource res;
52 struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
58 static uint64_t vmw_user_stream_size;
60 static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
75 return container_of(bo, struct vmw_dma_buffer, base);
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
93 * vmw_resource_release_id - release a resource id to the id manager.
95 * @res: Pointer to the resource.
97 * Release the resource id to the resource id manager and set it to -1
99 void vmw_resource_release_id(struct vmw_resource *res)
101 struct vmw_private *dev_priv = res->dev_priv;
102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
104 write_lock(&dev_priv->resource_lock);
106 idr_remove(idr, res->id);
108 write_unlock(&dev_priv->resource_lock);
111 static void vmw_resource_release(struct kref *kref)
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
120 list_del_init(&res->lru_head);
121 write_unlock(&dev_priv->resource_lock);
123 struct ttm_buffer_object *bo = &res->backup->base;
125 ttm_bo_reserve(bo, false, false, false, 0);
126 if (!list_empty(&res->mob_head) &&
127 res->func->unbind != NULL) {
128 struct ttm_validate_buffer val_buf;
131 res->func->unbind(res, false, &val_buf);
133 res->backup_dirty = false;
134 list_del_init(&res->mob_head);
135 ttm_bo_unreserve(bo);
136 vmw_dmabuf_unreference(&res->backup);
139 if (likely(res->hw_destroy != NULL))
140 res->hw_destroy(res);
143 if (res->res_free != NULL)
148 write_lock(&dev_priv->resource_lock);
154 void vmw_resource_unreference(struct vmw_resource **p_res)
156 struct vmw_resource *res = *p_res;
157 struct vmw_private *dev_priv = res->dev_priv;
160 write_lock(&dev_priv->resource_lock);
161 kref_put(&res->kref, vmw_resource_release);
162 write_unlock(&dev_priv->resource_lock);
167 * vmw_resource_alloc_id - release a resource id to the id manager.
169 * @res: Pointer to the resource.
171 * Allocate the lowest free resource from the resource manager, and set
172 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
174 int vmw_resource_alloc_id(struct vmw_resource *res)
176 struct vmw_private *dev_priv = res->dev_priv;
178 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
180 BUG_ON(res->id != -1);
182 idr_preload(GFP_KERNEL);
183 write_lock(&dev_priv->resource_lock);
185 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
189 write_unlock(&dev_priv->resource_lock);
191 return ret < 0 ? ret : 0;
195 * vmw_resource_init - initialize a struct vmw_resource
197 * @dev_priv: Pointer to a device private struct.
198 * @res: The struct vmw_resource to initialize.
199 * @obj_type: Resource object type.
200 * @delay_id: Boolean whether to defer device id allocation until
201 * the first validation.
202 * @res_free: Resource destructor.
203 * @func: Resource function table.
205 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
207 void (*res_free) (struct vmw_resource *res),
208 const struct vmw_res_func *func)
210 kref_init(&res->kref);
211 res->hw_destroy = NULL;
212 res->res_free = res_free;
214 res->dev_priv = dev_priv;
216 INIT_LIST_HEAD(&res->lru_head);
217 INIT_LIST_HEAD(&res->mob_head);
220 res->backup_offset = 0;
221 res->backup_dirty = false;
222 res->res_dirty = false;
226 return vmw_resource_alloc_id(res);
230 * vmw_resource_activate
232 * @res: Pointer to the newly created resource
233 * @hw_destroy: Destroy function. NULL if none.
235 * Activate a resource after the hardware has been made aware of it.
236 * Set tye destroy function to @destroy. Typically this frees the
237 * resource and destroys the hardware resources associated with it.
238 * Activate basically means that the function vmw_resource_lookup will
241 void vmw_resource_activate(struct vmw_resource *res,
242 void (*hw_destroy) (struct vmw_resource *))
244 struct vmw_private *dev_priv = res->dev_priv;
246 write_lock(&dev_priv->resource_lock);
248 res->hw_destroy = hw_destroy;
249 write_unlock(&dev_priv->resource_lock);
252 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
253 struct idr *idr, int id)
255 struct vmw_resource *res;
257 read_lock(&dev_priv->resource_lock);
258 res = idr_find(idr, id);
259 if (res && res->avail)
260 kref_get(&res->kref);
263 read_unlock(&dev_priv->resource_lock);
265 if (unlikely(res == NULL))
272 * vmw_user_resource_lookup_handle - lookup a struct resource from a
273 * TTM user-space handle and perform basic type checks
275 * @dev_priv: Pointer to a device private struct
276 * @tfile: Pointer to a struct ttm_object_file identifying the caller
277 * @handle: The TTM user-space handle
278 * @converter: Pointer to an object describing the resource type
279 * @p_res: On successful return the location pointed to will contain
280 * a pointer to a refcounted struct vmw_resource.
282 * If the handle can't be found or is associated with an incorrect resource
283 * type, -EINVAL will be returned.
285 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
286 struct ttm_object_file *tfile,
288 const struct vmw_user_resource_conv
290 struct vmw_resource **p_res)
292 struct ttm_base_object *base;
293 struct vmw_resource *res;
296 base = ttm_base_object_lookup(tfile, handle);
297 if (unlikely(base == NULL))
300 if (unlikely(ttm_base_object_type(base) != converter->object_type))
301 goto out_bad_resource;
303 res = converter->base_obj_to_res(base);
305 read_lock(&dev_priv->resource_lock);
306 if (!res->avail || res->res_free != converter->res_free) {
307 read_unlock(&dev_priv->resource_lock);
308 goto out_bad_resource;
311 kref_get(&res->kref);
312 read_unlock(&dev_priv->resource_lock);
318 ttm_base_object_unref(&base);
324 * Helper function that looks either a surface or dmabuf.
326 * The pointer this pointed at by out_surf and out_buf needs to be null.
328 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329 struct ttm_object_file *tfile,
331 struct vmw_surface **out_surf,
332 struct vmw_dma_buffer **out_buf)
334 struct vmw_resource *res;
337 BUG_ON(*out_surf || *out_buf);
339 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340 user_surface_converter,
343 *out_surf = vmw_res_to_srf(res);
348 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
363 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
383 return ((user) ? user_struct_size : struct_size) +
387 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
394 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
398 ttm_prime_object_kfree(vmw_user_bo, prime);
401 int vmw_dmabuf_init(struct vmw_private *dev_priv,
402 struct vmw_dma_buffer *vmw_bo,
403 size_t size, struct ttm_placement *placement,
405 void (*bo_free) (struct ttm_buffer_object *bo))
407 struct ttm_bo_device *bdev = &dev_priv->bdev;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
415 memset(vmw_bo, 0, sizeof(*vmw_bo));
417 INIT_LIST_HEAD(&vmw_bo->res_list);
419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
423 NULL, acc_size, NULL, bo_free);
427 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
429 struct vmw_user_dma_buffer *vmw_user_bo;
430 struct ttm_base_object *base = *p_base;
431 struct ttm_buffer_object *bo;
435 if (unlikely(base == NULL))
438 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
440 bo = &vmw_user_bo->dma.base;
445 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
447 * @dev_priv: Pointer to a struct device private.
448 * @tfile: Pointer to a struct ttm_object_file on which to register the user
450 * @size: Size of the dma buffer.
451 * @shareable: Boolean whether the buffer is shareable with other open files.
452 * @handle: Pointer to where the handle value should be assigned.
453 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
454 * should be assigned.
456 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
457 struct ttm_object_file *tfile,
461 struct vmw_dma_buffer **p_dma_buf)
463 struct vmw_user_dma_buffer *user_bo;
464 struct ttm_buffer_object *tmp;
467 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
468 if (unlikely(user_bo == NULL)) {
469 DRM_ERROR("Failed to allocate a buffer.\n");
473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
474 (dev_priv->has_mob) ?
476 &vmw_vram_sys_placement, true,
477 &vmw_user_dmabuf_destroy);
478 if (unlikely(ret != 0))
481 tmp = ttm_bo_reference(&user_bo->dma.base);
482 ret = ttm_prime_object_init(tfile,
487 &vmw_user_dmabuf_release, NULL);
488 if (unlikely(ret != 0)) {
490 goto out_no_base_object;
493 *p_dma_buf = &user_bo->dma;
494 *handle = user_bo->prime.base.hash.key;
501 * vmw_user_dmabuf_verify_access - verify access permissions on this
504 * @bo: Pointer to the buffer object being accessed
505 * @tfile: Identifying the caller.
507 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
508 struct ttm_object_file *tfile)
510 struct vmw_user_dma_buffer *vmw_user_bo;
512 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
515 vmw_user_bo = vmw_user_dma_buffer(bo);
516 return (vmw_user_bo->prime.base.tfile == tfile ||
517 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
520 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
521 struct drm_file *file_priv)
523 struct vmw_private *dev_priv = vmw_priv(dev);
524 union drm_vmw_alloc_dmabuf_arg *arg =
525 (union drm_vmw_alloc_dmabuf_arg *)data;
526 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
527 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
528 struct vmw_dma_buffer *dma_buf;
530 struct vmw_master *vmaster = vmw_master(file_priv->master);
533 ret = ttm_read_lock(&vmaster->lock, true);
534 if (unlikely(ret != 0))
537 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
538 req->size, false, &handle, &dma_buf);
539 if (unlikely(ret != 0))
542 rep->handle = handle;
543 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
544 rep->cur_gmr_id = handle;
545 rep->cur_gmr_offset = 0;
547 vmw_dmabuf_unreference(&dma_buf);
550 ttm_read_unlock(&vmaster->lock);
555 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
556 struct drm_file *file_priv)
558 struct drm_vmw_unref_dmabuf_arg *arg =
559 (struct drm_vmw_unref_dmabuf_arg *)data;
561 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
566 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
567 uint32_t handle, struct vmw_dma_buffer **out)
569 struct vmw_user_dma_buffer *vmw_user_bo;
570 struct ttm_base_object *base;
572 base = ttm_base_object_lookup(tfile, handle);
573 if (unlikely(base == NULL)) {
574 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
575 (unsigned long)handle);
579 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
580 ttm_base_object_unref(&base);
581 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
582 (unsigned long)handle);
586 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
588 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
589 ttm_base_object_unref(&base);
590 *out = &vmw_user_bo->dma;
595 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
596 struct vmw_dma_buffer *dma_buf)
598 struct vmw_user_dma_buffer *user_bo;
600 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
603 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
604 return ttm_ref_object_add(tfile, &user_bo->prime.base,
605 TTM_REF_USAGE, NULL);
612 static void vmw_stream_destroy(struct vmw_resource *res)
614 struct vmw_private *dev_priv = res->dev_priv;
615 struct vmw_stream *stream;
618 DRM_INFO("%s: unref\n", __func__);
619 stream = container_of(res, struct vmw_stream, res);
621 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
625 static int vmw_stream_init(struct vmw_private *dev_priv,
626 struct vmw_stream *stream,
627 void (*res_free) (struct vmw_resource *res))
629 struct vmw_resource *res = &stream->res;
632 ret = vmw_resource_init(dev_priv, res, false, res_free,
635 if (unlikely(ret != 0)) {
636 if (res_free == NULL)
639 res_free(&stream->res);
643 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
645 vmw_resource_unreference(&res);
649 DRM_INFO("%s: claimed\n", __func__);
651 vmw_resource_activate(&stream->res, vmw_stream_destroy);
655 static void vmw_user_stream_free(struct vmw_resource *res)
657 struct vmw_user_stream *stream =
658 container_of(res, struct vmw_user_stream, stream.res);
659 struct vmw_private *dev_priv = res->dev_priv;
661 ttm_base_object_kfree(stream, base);
662 ttm_mem_global_free(vmw_mem_glob(dev_priv),
663 vmw_user_stream_size);
667 * This function is called when user space has no more references on the
668 * base object. It releases the base-object's reference on the resource object.
671 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
673 struct ttm_base_object *base = *p_base;
674 struct vmw_user_stream *stream =
675 container_of(base, struct vmw_user_stream, base);
676 struct vmw_resource *res = &stream->stream.res;
679 vmw_resource_unreference(&res);
682 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
683 struct drm_file *file_priv)
685 struct vmw_private *dev_priv = vmw_priv(dev);
686 struct vmw_resource *res;
687 struct vmw_user_stream *stream;
688 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
689 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
690 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
694 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
695 if (unlikely(res == NULL))
698 if (res->res_free != &vmw_user_stream_free) {
703 stream = container_of(res, struct vmw_user_stream, stream.res);
704 if (stream->base.tfile != tfile) {
709 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
711 vmw_resource_unreference(&res);
715 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
716 struct drm_file *file_priv)
718 struct vmw_private *dev_priv = vmw_priv(dev);
719 struct vmw_user_stream *stream;
720 struct vmw_resource *res;
721 struct vmw_resource *tmp;
722 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
723 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
724 struct vmw_master *vmaster = vmw_master(file_priv->master);
728 * Approximate idr memory usage with 128 bytes. It will be limited
729 * by maximum number_of streams anyway?
732 if (unlikely(vmw_user_stream_size == 0))
733 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
735 ret = ttm_read_lock(&vmaster->lock, true);
736 if (unlikely(ret != 0))
739 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
740 vmw_user_stream_size,
742 if (unlikely(ret != 0)) {
743 if (ret != -ERESTARTSYS)
744 DRM_ERROR("Out of graphics memory for stream"
750 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
751 if (unlikely(stream == NULL)) {
752 ttm_mem_global_free(vmw_mem_glob(dev_priv),
753 vmw_user_stream_size);
758 res = &stream->stream.res;
759 stream->base.shareable = false;
760 stream->base.tfile = NULL;
763 * From here on, the destructor takes over resource freeing.
766 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
767 if (unlikely(ret != 0))
770 tmp = vmw_resource_reference(res);
771 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
772 &vmw_user_stream_base_release, NULL);
774 if (unlikely(ret != 0)) {
775 vmw_resource_unreference(&tmp);
779 arg->stream_id = res->id;
781 vmw_resource_unreference(&res);
783 ttm_read_unlock(&vmaster->lock);
787 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
788 struct ttm_object_file *tfile,
789 uint32_t *inout_id, struct vmw_resource **out)
791 struct vmw_user_stream *stream;
792 struct vmw_resource *res;
795 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
797 if (unlikely(res == NULL))
800 if (res->res_free != &vmw_user_stream_free) {
805 stream = container_of(res, struct vmw_user_stream, stream.res);
806 if (stream->base.tfile != tfile) {
811 *inout_id = stream->stream.stream_id;
815 vmw_resource_unreference(&res);
821 * vmw_dumb_create - Create a dumb kms buffer
823 * @file_priv: Pointer to a struct drm_file identifying the caller.
824 * @dev: Pointer to the drm device.
825 * @args: Pointer to a struct drm_mode_create_dumb structure
827 * This is a driver callback for the core drm create_dumb functionality.
828 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
829 * that the arguments have a different format.
831 int vmw_dumb_create(struct drm_file *file_priv,
832 struct drm_device *dev,
833 struct drm_mode_create_dumb *args)
835 struct vmw_private *dev_priv = vmw_priv(dev);
836 struct vmw_master *vmaster = vmw_master(file_priv->master);
837 struct vmw_dma_buffer *dma_buf;
840 args->pitch = args->width * ((args->bpp + 7) / 8);
841 args->size = args->pitch * args->height;
843 ret = ttm_read_lock(&vmaster->lock, true);
844 if (unlikely(ret != 0))
847 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
848 args->size, false, &args->handle,
850 if (unlikely(ret != 0))
853 vmw_dmabuf_unreference(&dma_buf);
855 ttm_read_unlock(&vmaster->lock);
860 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
862 * @file_priv: Pointer to a struct drm_file identifying the caller.
863 * @dev: Pointer to the drm device.
864 * @handle: Handle identifying the dumb buffer.
865 * @offset: The address space offset returned.
867 * This is a driver callback for the core drm dumb_map_offset functionality.
869 int vmw_dumb_map_offset(struct drm_file *file_priv,
870 struct drm_device *dev, uint32_t handle,
873 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
874 struct vmw_dma_buffer *out_buf;
877 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
881 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
882 vmw_dmabuf_unreference(&out_buf);
887 * vmw_dumb_destroy - Destroy a dumb boffer
889 * @file_priv: Pointer to a struct drm_file identifying the caller.
890 * @dev: Pointer to the drm device.
891 * @handle: Handle identifying the dumb buffer.
893 * This is a driver callback for the core drm dumb_destroy functionality.
895 int vmw_dumb_destroy(struct drm_file *file_priv,
896 struct drm_device *dev,
899 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
900 handle, TTM_REF_USAGE);
904 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
906 * @res: The resource for which to allocate a backup buffer.
907 * @interruptible: Whether any sleeps during allocation should be
908 * performed while interruptible.
910 static int vmw_resource_buf_alloc(struct vmw_resource *res,
914 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
915 struct vmw_dma_buffer *backup;
918 if (likely(res->backup)) {
919 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
923 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
924 if (unlikely(backup == NULL))
927 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
928 res->func->backup_placement,
930 &vmw_dmabuf_bo_free);
931 if (unlikely(ret != 0))
934 res->backup = backup;
941 * vmw_resource_do_validate - Make a resource up-to-date and visible
944 * @res: The resource to make visible to the device.
945 * @val_buf: Information about a buffer possibly
946 * containing backup data if a bind operation is needed.
948 * On hardware resource shortage, this function returns -EBUSY and
949 * should be retried once resources have been freed up.
951 static int vmw_resource_do_validate(struct vmw_resource *res,
952 struct ttm_validate_buffer *val_buf)
955 const struct vmw_res_func *func = res->func;
957 if (unlikely(res->id == -1)) {
958 ret = func->create(res);
959 if (unlikely(ret != 0))
964 ((func->needs_backup && list_empty(&res->mob_head) &&
965 val_buf->bo != NULL) ||
966 (!func->needs_backup && val_buf->bo != NULL))) {
967 ret = func->bind(res, val_buf);
968 if (unlikely(ret != 0))
969 goto out_bind_failed;
970 if (func->needs_backup)
971 list_add_tail(&res->mob_head, &res->backup->res_list);
975 * Only do this on write operations, and move to
976 * vmw_resource_unreserve if it can be called after
977 * backup buffers have been unreserved. Otherwise
980 res->res_dirty = true;
991 * vmw_resource_unreserve - Unreserve a resource previously reserved for
992 * command submission.
994 * @res: Pointer to the struct vmw_resource to unreserve.
995 * @new_backup: Pointer to new backup buffer if command submission
997 * @new_backup_offset: New backup offset if @new_backup is !NULL.
999 * Currently unreserving a resource means putting it back on the device's
1000 * resource lru list, so that it can be evicted if necessary.
1002 void vmw_resource_unreserve(struct vmw_resource *res,
1003 struct vmw_dma_buffer *new_backup,
1004 unsigned long new_backup_offset)
1006 struct vmw_private *dev_priv = res->dev_priv;
1008 if (!list_empty(&res->lru_head))
1011 if (new_backup && new_backup != res->backup) {
1014 lockdep_assert_held(&res->backup->base.resv->lock.base);
1015 list_del_init(&res->mob_head);
1016 vmw_dmabuf_unreference(&res->backup);
1019 res->backup = vmw_dmabuf_reference(new_backup);
1020 lockdep_assert_held(&new_backup->base.resv->lock.base);
1021 list_add_tail(&res->mob_head, &new_backup->res_list);
1024 res->backup_offset = new_backup_offset;
1026 if (!res->func->may_evict || res->id == -1)
1029 write_lock(&dev_priv->resource_lock);
1030 list_add_tail(&res->lru_head,
1031 &res->dev_priv->res_lru[res->func->res_type]);
1032 write_unlock(&dev_priv->resource_lock);
1036 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1037 * for a resource and in that case, allocate
1038 * one, reserve and validate it.
1040 * @res: The resource for which to allocate a backup buffer.
1041 * @interruptible: Whether any sleeps during allocation should be
1042 * performed while interruptible.
1043 * @val_buf: On successful return contains data about the
1044 * reserved and validated backup buffer.
1047 vmw_resource_check_buffer(struct vmw_resource *res,
1049 struct ttm_validate_buffer *val_buf)
1051 struct list_head val_list;
1052 bool backup_dirty = false;
1055 if (unlikely(res->backup == NULL)) {
1056 ret = vmw_resource_buf_alloc(res, interruptible);
1057 if (unlikely(ret != 0))
1061 INIT_LIST_HEAD(&val_list);
1062 val_buf->bo = ttm_bo_reference(&res->backup->base);
1063 list_add_tail(&val_buf->head, &val_list);
1064 ret = ttm_eu_reserve_buffers(NULL, &val_list);
1065 if (unlikely(ret != 0))
1066 goto out_no_reserve;
1068 if (res->func->needs_backup && list_empty(&res->mob_head))
1071 backup_dirty = res->backup_dirty;
1072 ret = ttm_bo_validate(&res->backup->base,
1073 res->func->backup_placement,
1076 if (unlikely(ret != 0))
1077 goto out_no_validate;
1082 ttm_eu_backoff_reservation(NULL, &val_list);
1084 ttm_bo_unref(&val_buf->bo);
1086 vmw_dmabuf_unreference(&res->backup);
1092 * vmw_resource_reserve - Reserve a resource for command submission
1094 * @res: The resource to reserve.
1096 * This function takes the resource off the LRU list and make sure
1097 * a backup buffer is present for guest-backed resources. However,
1098 * the buffer may not be bound to the resource at this point.
1101 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1103 struct vmw_private *dev_priv = res->dev_priv;
1106 write_lock(&dev_priv->resource_lock);
1107 list_del_init(&res->lru_head);
1108 write_unlock(&dev_priv->resource_lock);
1110 if (res->func->needs_backup && res->backup == NULL &&
1112 ret = vmw_resource_buf_alloc(res, true);
1113 if (unlikely(ret != 0))
1121 * vmw_resource_backoff_reservation - Unreserve and unreference a
1124 * @val_buf: Backup buffer information.
1127 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1129 struct list_head val_list;
1131 if (likely(val_buf->bo == NULL))
1134 INIT_LIST_HEAD(&val_list);
1135 list_add_tail(&val_buf->head, &val_list);
1136 ttm_eu_backoff_reservation(NULL, &val_list);
1137 ttm_bo_unref(&val_buf->bo);
1141 * vmw_resource_do_evict - Evict a resource, and transfer its data
1142 * to a backup buffer.
1144 * @res: The resource to evict.
1145 * @interruptible: Whether to wait interruptible.
1147 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1149 struct ttm_validate_buffer val_buf;
1150 const struct vmw_res_func *func = res->func;
1153 BUG_ON(!func->may_evict);
1156 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1157 if (unlikely(ret != 0))
1160 if (unlikely(func->unbind != NULL &&
1161 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1162 ret = func->unbind(res, res->res_dirty, &val_buf);
1163 if (unlikely(ret != 0))
1165 list_del_init(&res->mob_head);
1167 ret = func->destroy(res);
1168 res->backup_dirty = true;
1169 res->res_dirty = false;
1171 vmw_resource_backoff_reservation(&val_buf);
1178 * vmw_resource_validate - Make a resource up-to-date and visible
1181 * @res: The resource to make visible to the device.
1183 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1184 * be reserved and validated.
1185 * On hardware resource shortage, this function will repeatedly evict
1186 * resources of the same type until the validation succeeds.
1188 int vmw_resource_validate(struct vmw_resource *res)
1191 struct vmw_resource *evict_res;
1192 struct vmw_private *dev_priv = res->dev_priv;
1193 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1194 struct ttm_validate_buffer val_buf;
1195 unsigned err_count = 0;
1197 if (likely(!res->func->may_evict))
1202 val_buf.bo = &res->backup->base;
1204 ret = vmw_resource_do_validate(res, &val_buf);
1205 if (likely(ret != -EBUSY))
1208 write_lock(&dev_priv->resource_lock);
1209 if (list_empty(lru_list) || !res->func->may_evict) {
1210 DRM_ERROR("Out of device device resources "
1211 "for %s.\n", res->func->type_name);
1213 write_unlock(&dev_priv->resource_lock);
1217 evict_res = vmw_resource_reference
1218 (list_first_entry(lru_list, struct vmw_resource,
1220 list_del_init(&evict_res->lru_head);
1222 write_unlock(&dev_priv->resource_lock);
1224 ret = vmw_resource_do_evict(evict_res, true);
1225 if (unlikely(ret != 0)) {
1226 write_lock(&dev_priv->resource_lock);
1227 list_add_tail(&evict_res->lru_head, lru_list);
1228 write_unlock(&dev_priv->resource_lock);
1229 if (ret == -ERESTARTSYS ||
1230 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1231 vmw_resource_unreference(&evict_res);
1232 goto out_no_validate;
1236 vmw_resource_unreference(&evict_res);
1239 if (unlikely(ret != 0))
1240 goto out_no_validate;
1241 else if (!res->func->needs_backup && res->backup) {
1242 list_del_init(&res->mob_head);
1243 vmw_dmabuf_unreference(&res->backup);
1253 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1254 * object without unreserving it.
1256 * @bo: Pointer to the struct ttm_buffer_object to fence.
1257 * @fence: Pointer to the fence. If NULL, this function will
1258 * insert a fence into the command stream..
1260 * Contrary to the ttm_eu version of this function, it takes only
1261 * a single buffer object instead of a list, and it also doesn't
1262 * unreserve the buffer object, which needs to be done separately.
1264 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1265 struct vmw_fence_obj *fence)
1267 struct ttm_bo_device *bdev = bo->bdev;
1268 struct ttm_bo_driver *driver = bdev->driver;
1269 struct vmw_fence_obj *old_fence_obj;
1270 struct vmw_private *dev_priv =
1271 container_of(bdev, struct vmw_private, bdev);
1274 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1276 driver->sync_obj_ref(fence);
1278 spin_lock(&bdev->fence_lock);
1280 old_fence_obj = bo->sync_obj;
1281 bo->sync_obj = fence;
1283 spin_unlock(&bdev->fence_lock);
1286 vmw_fence_obj_unreference(&old_fence_obj);
1290 * vmw_resource_move_notify - TTM move_notify_callback
1292 * @bo: The TTM buffer object about to move.
1293 * @mem: The truct ttm_mem_reg indicating to what memory
1294 * region the move is taking place.
1296 * For now does nothing.
1298 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1299 struct ttm_mem_reg *mem)
1304 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1306 * @res: The resource being queried.
1308 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1310 return res->func->needs_backup;
1314 * vmw_resource_evict_type - Evict all resources of a specific type
1316 * @dev_priv: Pointer to a device private struct
1317 * @type: The resource type to evict
1319 * To avoid thrashing starvation or as part of the hibernation sequence,
1320 * try to evict all evictable resources of a specific type.
1322 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1323 enum vmw_res_type type)
1325 struct list_head *lru_list = &dev_priv->res_lru[type];
1326 struct vmw_resource *evict_res;
1327 unsigned err_count = 0;
1331 write_lock(&dev_priv->resource_lock);
1333 if (list_empty(lru_list))
1336 evict_res = vmw_resource_reference(
1337 list_first_entry(lru_list, struct vmw_resource,
1339 list_del_init(&evict_res->lru_head);
1340 write_unlock(&dev_priv->resource_lock);
1342 ret = vmw_resource_do_evict(evict_res, false);
1343 if (unlikely(ret != 0)) {
1344 write_lock(&dev_priv->resource_lock);
1345 list_add_tail(&evict_res->lru_head, lru_list);
1346 write_unlock(&dev_priv->resource_lock);
1347 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1348 vmw_resource_unreference(&evict_res);
1353 vmw_resource_unreference(&evict_res);
1357 write_unlock(&dev_priv->resource_lock);
1361 * vmw_resource_evict_all - Evict all evictable resources
1363 * @dev_priv: Pointer to a device private struct
1365 * To avoid thrashing starvation or as part of the hibernation sequence,
1366 * evict all evictable resources. In particular this means that all
1367 * guest-backed resources that are registered with the device are
1368 * evicted and the OTable becomes clean.
1370 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1372 enum vmw_res_type type;
1374 mutex_lock(&dev_priv->cmdbuf_mutex);
1376 for (type = 0; type < vmw_res_max; ++type)
1377 vmw_resource_evict_type(dev_priv, type);
1379 mutex_unlock(&dev_priv->cmdbuf_mutex);