1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 struct vmw_user_dma_buffer {
36 struct ttm_base_object base;
37 struct vmw_dma_buffer dma;
40 struct vmw_bo_user_rep {
46 struct vmw_resource res;
50 struct vmw_user_stream {
51 struct ttm_base_object base;
52 struct vmw_stream stream;
56 static uint64_t vmw_user_stream_size;
58 static const struct vmw_res_func vmw_stream_func = {
59 .res_type = vmw_res_stream,
60 .needs_backup = false,
62 .type_name = "video streams",
63 .backup_placement = NULL,
70 static inline struct vmw_dma_buffer *
71 vmw_dma_buffer(struct ttm_buffer_object *bo)
73 return container_of(bo, struct vmw_dma_buffer, base);
76 static inline struct vmw_user_dma_buffer *
77 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
79 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
80 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
91 * vmw_resource_release_id - release a resource id to the id manager.
93 * @res: Pointer to the resource.
95 * Release the resource id to the resource id manager and set it to -1
97 void vmw_resource_release_id(struct vmw_resource *res)
99 struct vmw_private *dev_priv = res->dev_priv;
100 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
102 write_lock(&dev_priv->resource_lock);
104 idr_remove(idr, res->id);
106 write_unlock(&dev_priv->resource_lock);
109 static void vmw_resource_release(struct kref *kref)
111 struct vmw_resource *res =
112 container_of(kref, struct vmw_resource, kref);
113 struct vmw_private *dev_priv = res->dev_priv;
115 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118 list_del_init(&res->lru_head);
119 write_unlock(&dev_priv->resource_lock);
121 struct ttm_buffer_object *bo = &res->backup->base;
123 ttm_bo_reserve(bo, false, false, false, 0);
124 if (!list_empty(&res->mob_head) &&
125 res->func->unbind != NULL) {
126 struct ttm_validate_buffer val_buf;
129 res->func->unbind(res, false, &val_buf);
131 res->backup_dirty = false;
132 list_del_init(&res->mob_head);
133 ttm_bo_unreserve(bo);
134 vmw_dmabuf_unreference(&res->backup);
137 if (likely(res->hw_destroy != NULL))
138 res->hw_destroy(res);
141 if (res->res_free != NULL)
146 write_lock(&dev_priv->resource_lock);
152 void vmw_resource_unreference(struct vmw_resource **p_res)
154 struct vmw_resource *res = *p_res;
155 struct vmw_private *dev_priv = res->dev_priv;
158 write_lock(&dev_priv->resource_lock);
159 kref_put(&res->kref, vmw_resource_release);
160 write_unlock(&dev_priv->resource_lock);
165 * vmw_resource_alloc_id - release a resource id to the id manager.
167 * @res: Pointer to the resource.
169 * Allocate the lowest free resource from the resource manager, and set
170 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
172 int vmw_resource_alloc_id(struct vmw_resource *res)
174 struct vmw_private *dev_priv = res->dev_priv;
176 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
178 BUG_ON(res->id != -1);
181 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
184 write_lock(&dev_priv->resource_lock);
185 ret = idr_get_new_above(idr, res, 1, &res->id);
186 write_unlock(&dev_priv->resource_lock);
188 } while (ret == -EAGAIN);
194 * vmw_resource_init - initialize a struct vmw_resource
196 * @dev_priv: Pointer to a device private struct.
197 * @res: The struct vmw_resource to initialize.
198 * @obj_type: Resource object type.
199 * @delay_id: Boolean whether to defer device id allocation until
200 * the first validation.
201 * @res_free: Resource destructor.
202 * @func: Resource function table.
204 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206 void (*res_free) (struct vmw_resource *res),
207 const struct vmw_res_func *func)
209 kref_init(&res->kref);
210 res->hw_destroy = NULL;
211 res->res_free = res_free;
213 res->dev_priv = dev_priv;
215 INIT_LIST_HEAD(&res->lru_head);
216 INIT_LIST_HEAD(&res->mob_head);
219 res->backup_offset = 0;
220 res->backup_dirty = false;
221 res->res_dirty = false;
225 return vmw_resource_alloc_id(res);
229 * vmw_resource_activate
231 * @res: Pointer to the newly created resource
232 * @hw_destroy: Destroy function. NULL if none.
234 * Activate a resource after the hardware has been made aware of it.
235 * Set tye destroy function to @destroy. Typically this frees the
236 * resource and destroys the hardware resources associated with it.
237 * Activate basically means that the function vmw_resource_lookup will
240 void vmw_resource_activate(struct vmw_resource *res,
241 void (*hw_destroy) (struct vmw_resource *))
243 struct vmw_private *dev_priv = res->dev_priv;
245 write_lock(&dev_priv->resource_lock);
247 res->hw_destroy = hw_destroy;
248 write_unlock(&dev_priv->resource_lock);
251 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
252 struct idr *idr, int id)
254 struct vmw_resource *res;
256 read_lock(&dev_priv->resource_lock);
257 res = idr_find(idr, id);
258 if (res && res->avail)
259 kref_get(&res->kref);
262 read_unlock(&dev_priv->resource_lock);
264 if (unlikely(res == NULL))
271 * vmw_user_resource_lookup_handle - lookup a struct resource from a
272 * TTM user-space handle and perform basic type checks
274 * @dev_priv: Pointer to a device private struct
275 * @tfile: Pointer to a struct ttm_object_file identifying the caller
276 * @handle: The TTM user-space handle
277 * @converter: Pointer to an object describing the resource type
278 * @p_res: On successful return the location pointed to will contain
279 * a pointer to a refcounted struct vmw_resource.
281 * If the handle can't be found or is associated with an incorrect resource
282 * type, -EINVAL will be returned.
284 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
285 struct ttm_object_file *tfile,
287 const struct vmw_user_resource_conv
289 struct vmw_resource **p_res)
291 struct ttm_base_object *base;
292 struct vmw_resource *res;
295 base = ttm_base_object_lookup(tfile, handle);
296 if (unlikely(base == NULL))
299 if (unlikely(base->object_type != converter->object_type))
300 goto out_bad_resource;
302 res = converter->base_obj_to_res(base);
304 read_lock(&dev_priv->resource_lock);
305 if (!res->avail || res->res_free != converter->res_free) {
306 read_unlock(&dev_priv->resource_lock);
307 goto out_bad_resource;
310 kref_get(&res->kref);
311 read_unlock(&dev_priv->resource_lock);
317 ttm_base_object_unref(&base);
323 * Helper function that looks either a surface or dmabuf.
325 * The pointer this pointed at by out_surf and out_buf needs to be null.
327 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
328 struct ttm_object_file *tfile,
330 struct vmw_surface **out_surf,
331 struct vmw_dma_buffer **out_buf)
333 struct vmw_resource *res;
336 BUG_ON(*out_surf || *out_buf);
338 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
339 user_surface_converter,
342 *out_surf = vmw_res_to_srf(res);
347 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
354 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
356 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
361 int vmw_dmabuf_init(struct vmw_private *dev_priv,
362 struct vmw_dma_buffer *vmw_bo,
363 size_t size, struct ttm_placement *placement,
365 void (*bo_free) (struct ttm_buffer_object *bo))
367 struct ttm_bo_device *bdev = &dev_priv->bdev;
373 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
374 memset(vmw_bo, 0, sizeof(*vmw_bo));
376 INIT_LIST_HEAD(&vmw_bo->res_list);
378 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
379 ttm_bo_type_device, placement,
381 NULL, acc_size, NULL, bo_free);
385 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
387 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
389 ttm_base_object_kfree(vmw_user_bo, base);
392 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
394 struct vmw_user_dma_buffer *vmw_user_bo;
395 struct ttm_base_object *base = *p_base;
396 struct ttm_buffer_object *bo;
400 if (unlikely(base == NULL))
403 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
404 bo = &vmw_user_bo->dma.base;
409 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
411 * @dev_priv: Pointer to a struct device private.
412 * @tfile: Pointer to a struct ttm_object_file on which to register the user
414 * @size: Size of the dma buffer.
415 * @shareable: Boolean whether the buffer is shareable with other open files.
416 * @handle: Pointer to where the handle value should be assigned.
417 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
418 * should be assigned.
420 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
421 struct ttm_object_file *tfile,
425 struct vmw_dma_buffer **p_dma_buf)
427 struct vmw_user_dma_buffer *user_bo;
428 struct ttm_buffer_object *tmp;
431 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
432 if (unlikely(user_bo == NULL)) {
433 DRM_ERROR("Failed to allocate a buffer.\n");
437 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
438 &vmw_vram_sys_placement, true,
439 &vmw_user_dmabuf_destroy);
440 if (unlikely(ret != 0))
443 tmp = ttm_bo_reference(&user_bo->dma.base);
444 ret = ttm_base_object_init(tfile,
448 &vmw_user_dmabuf_release, NULL);
449 if (unlikely(ret != 0)) {
451 goto out_no_base_object;
454 *p_dma_buf = &user_bo->dma;
455 *handle = user_bo->base.hash.key;
461 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
464 struct vmw_private *dev_priv = vmw_priv(dev);
465 union drm_vmw_alloc_dmabuf_arg *arg =
466 (union drm_vmw_alloc_dmabuf_arg *)data;
467 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
468 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
469 struct vmw_dma_buffer *dma_buf;
471 struct vmw_master *vmaster = vmw_master(file_priv->master);
474 ret = ttm_read_lock(&vmaster->lock, true);
475 if (unlikely(ret != 0))
478 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
479 req->size, false, &handle, &dma_buf);
480 if (unlikely(ret != 0))
483 rep->handle = handle;
484 rep->map_handle = dma_buf->base.addr_space_offset;
485 rep->cur_gmr_id = handle;
486 rep->cur_gmr_offset = 0;
488 vmw_dmabuf_unreference(&dma_buf);
491 ttm_read_unlock(&vmaster->lock);
496 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
497 struct drm_file *file_priv)
499 struct drm_vmw_unref_dmabuf_arg *arg =
500 (struct drm_vmw_unref_dmabuf_arg *)data;
502 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
507 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
508 uint32_t handle, struct vmw_dma_buffer **out)
510 struct vmw_user_dma_buffer *vmw_user_bo;
511 struct ttm_base_object *base;
513 base = ttm_base_object_lookup(tfile, handle);
514 if (unlikely(base == NULL)) {
515 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
516 (unsigned long)handle);
520 if (unlikely(base->object_type != ttm_buffer_type)) {
521 ttm_base_object_unref(&base);
522 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
523 (unsigned long)handle);
527 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
528 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
529 ttm_base_object_unref(&base);
530 *out = &vmw_user_bo->dma;
535 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
536 struct vmw_dma_buffer *dma_buf)
538 struct vmw_user_dma_buffer *user_bo;
540 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
543 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
544 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
551 static void vmw_stream_destroy(struct vmw_resource *res)
553 struct vmw_private *dev_priv = res->dev_priv;
554 struct vmw_stream *stream;
557 DRM_INFO("%s: unref\n", __func__);
558 stream = container_of(res, struct vmw_stream, res);
560 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
564 static int vmw_stream_init(struct vmw_private *dev_priv,
565 struct vmw_stream *stream,
566 void (*res_free) (struct vmw_resource *res))
568 struct vmw_resource *res = &stream->res;
571 ret = vmw_resource_init(dev_priv, res, false, res_free,
574 if (unlikely(ret != 0)) {
575 if (res_free == NULL)
578 res_free(&stream->res);
582 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
584 vmw_resource_unreference(&res);
588 DRM_INFO("%s: claimed\n", __func__);
590 vmw_resource_activate(&stream->res, vmw_stream_destroy);
594 static void vmw_user_stream_free(struct vmw_resource *res)
596 struct vmw_user_stream *stream =
597 container_of(res, struct vmw_user_stream, stream.res);
598 struct vmw_private *dev_priv = res->dev_priv;
600 ttm_base_object_kfree(stream, base);
601 ttm_mem_global_free(vmw_mem_glob(dev_priv),
602 vmw_user_stream_size);
606 * This function is called when user space has no more references on the
607 * base object. It releases the base-object's reference on the resource object.
610 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
612 struct ttm_base_object *base = *p_base;
613 struct vmw_user_stream *stream =
614 container_of(base, struct vmw_user_stream, base);
615 struct vmw_resource *res = &stream->stream.res;
618 vmw_resource_unreference(&res);
621 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
622 struct drm_file *file_priv)
624 struct vmw_private *dev_priv = vmw_priv(dev);
625 struct vmw_resource *res;
626 struct vmw_user_stream *stream;
627 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
628 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
629 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
633 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
634 if (unlikely(res == NULL))
637 if (res->res_free != &vmw_user_stream_free) {
642 stream = container_of(res, struct vmw_user_stream, stream.res);
643 if (stream->base.tfile != tfile) {
648 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
650 vmw_resource_unreference(&res);
654 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
655 struct drm_file *file_priv)
657 struct vmw_private *dev_priv = vmw_priv(dev);
658 struct vmw_user_stream *stream;
659 struct vmw_resource *res;
660 struct vmw_resource *tmp;
661 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
662 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
663 struct vmw_master *vmaster = vmw_master(file_priv->master);
667 * Approximate idr memory usage with 128 bytes. It will be limited
668 * by maximum number_of streams anyway?
671 if (unlikely(vmw_user_stream_size == 0))
672 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
674 ret = ttm_read_lock(&vmaster->lock, true);
675 if (unlikely(ret != 0))
678 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
679 vmw_user_stream_size,
681 if (unlikely(ret != 0)) {
682 if (ret != -ERESTARTSYS)
683 DRM_ERROR("Out of graphics memory for stream"
689 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
690 if (unlikely(stream == NULL)) {
691 ttm_mem_global_free(vmw_mem_glob(dev_priv),
692 vmw_user_stream_size);
697 res = &stream->stream.res;
698 stream->base.shareable = false;
699 stream->base.tfile = NULL;
702 * From here on, the destructor takes over resource freeing.
705 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
706 if (unlikely(ret != 0))
709 tmp = vmw_resource_reference(res);
710 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
711 &vmw_user_stream_base_release, NULL);
713 if (unlikely(ret != 0)) {
714 vmw_resource_unreference(&tmp);
718 arg->stream_id = res->id;
720 vmw_resource_unreference(&res);
722 ttm_read_unlock(&vmaster->lock);
726 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
727 struct ttm_object_file *tfile,
728 uint32_t *inout_id, struct vmw_resource **out)
730 struct vmw_user_stream *stream;
731 struct vmw_resource *res;
734 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
736 if (unlikely(res == NULL))
739 if (res->res_free != &vmw_user_stream_free) {
744 stream = container_of(res, struct vmw_user_stream, stream.res);
745 if (stream->base.tfile != tfile) {
750 *inout_id = stream->stream.stream_id;
754 vmw_resource_unreference(&res);
759 int vmw_dumb_create(struct drm_file *file_priv,
760 struct drm_device *dev,
761 struct drm_mode_create_dumb *args)
763 struct vmw_private *dev_priv = vmw_priv(dev);
764 struct vmw_master *vmaster = vmw_master(file_priv->master);
765 struct vmw_user_dma_buffer *vmw_user_bo;
766 struct ttm_buffer_object *tmp;
769 args->pitch = args->width * ((args->bpp + 7) / 8);
770 args->size = args->pitch * args->height;
772 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
773 if (vmw_user_bo == NULL)
776 ret = ttm_read_lock(&vmaster->lock, true);
782 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
783 &vmw_vram_sys_placement, true,
784 &vmw_user_dmabuf_destroy);
788 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
789 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
793 &vmw_user_dmabuf_release, NULL);
794 if (unlikely(ret != 0))
795 goto out_no_base_object;
797 args->handle = vmw_user_bo->base.hash.key;
802 ttm_read_unlock(&vmaster->lock);
806 int vmw_dumb_map_offset(struct drm_file *file_priv,
807 struct drm_device *dev, uint32_t handle,
810 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
811 struct vmw_dma_buffer *out_buf;
814 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
818 *offset = out_buf->base.addr_space_offset;
819 vmw_dmabuf_unreference(&out_buf);
823 int vmw_dumb_destroy(struct drm_file *file_priv,
824 struct drm_device *dev,
827 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
828 handle, TTM_REF_USAGE);
832 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
834 * @res: The resource for which to allocate a backup buffer.
835 * @interruptible: Whether any sleeps during allocation should be
836 * performed while interruptible.
838 static int vmw_resource_buf_alloc(struct vmw_resource *res,
842 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
843 struct vmw_dma_buffer *backup;
846 if (likely(res->backup)) {
847 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
851 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
852 if (unlikely(backup == NULL))
855 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
856 res->func->backup_placement,
858 &vmw_dmabuf_bo_free);
859 if (unlikely(ret != 0))
862 res->backup = backup;
869 * vmw_resource_do_validate - Make a resource up-to-date and visible
872 * @res: The resource to make visible to the device.
873 * @val_buf: Information about a buffer possibly
874 * containing backup data if a bind operation is needed.
876 * On hardware resource shortage, this function returns -EBUSY and
877 * should be retried once resources have been freed up.
879 static int vmw_resource_do_validate(struct vmw_resource *res,
880 struct ttm_validate_buffer *val_buf)
883 const struct vmw_res_func *func = res->func;
885 if (unlikely(res->id == -1)) {
886 ret = func->create(res);
887 if (unlikely(ret != 0))
892 ((func->needs_backup && list_empty(&res->mob_head) &&
893 val_buf->bo != NULL) ||
894 (!func->needs_backup && val_buf->bo != NULL))) {
895 ret = func->bind(res, val_buf);
896 if (unlikely(ret != 0))
897 goto out_bind_failed;
898 if (func->needs_backup)
899 list_add_tail(&res->mob_head, &res->backup->res_list);
903 * Only do this on write operations, and move to
904 * vmw_resource_unreserve if it can be called after
905 * backup buffers have been unreserved. Otherwise
908 res->res_dirty = true;
919 * vmw_resource_unreserve - Unreserve a resource previously reserved for
920 * command submission.
922 * @res: Pointer to the struct vmw_resource to unreserve.
923 * @new_backup: Pointer to new backup buffer if command submission
925 * @new_backup_offset: New backup offset if @new_backup is !NULL.
927 * Currently unreserving a resource means putting it back on the device's
928 * resource lru list, so that it can be evicted if necessary.
930 void vmw_resource_unreserve(struct vmw_resource *res,
931 struct vmw_dma_buffer *new_backup,
932 unsigned long new_backup_offset)
934 struct vmw_private *dev_priv = res->dev_priv;
936 if (!list_empty(&res->lru_head))
939 if (new_backup && new_backup != res->backup) {
942 BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
943 list_del_init(&res->mob_head);
944 vmw_dmabuf_unreference(&res->backup);
947 res->backup = vmw_dmabuf_reference(new_backup);
948 BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
949 list_add_tail(&res->mob_head, &new_backup->res_list);
952 res->backup_offset = new_backup_offset;
954 if (!res->func->may_evict)
957 write_lock(&dev_priv->resource_lock);
958 list_add_tail(&res->lru_head,
959 &res->dev_priv->res_lru[res->func->res_type]);
960 write_unlock(&dev_priv->resource_lock);
964 * vmw_resource_check_buffer - Check whether a backup buffer is needed
965 * for a resource and in that case, allocate
966 * one, reserve and validate it.
968 * @res: The resource for which to allocate a backup buffer.
969 * @interruptible: Whether any sleeps during allocation should be
970 * performed while interruptible.
971 * @val_buf: On successful return contains data about the
972 * reserved and validated backup buffer.
974 int vmw_resource_check_buffer(struct vmw_resource *res,
976 struct ttm_validate_buffer *val_buf)
978 struct list_head val_list;
979 bool backup_dirty = false;
982 if (unlikely(res->backup == NULL)) {
983 ret = vmw_resource_buf_alloc(res, interruptible);
984 if (unlikely(ret != 0))
988 INIT_LIST_HEAD(&val_list);
989 val_buf->bo = ttm_bo_reference(&res->backup->base);
990 list_add_tail(&val_buf->head, &val_list);
991 ret = ttm_eu_reserve_buffers(&val_list);
992 if (unlikely(ret != 0))
995 if (res->func->needs_backup && list_empty(&res->mob_head))
998 backup_dirty = res->backup_dirty;
999 ret = ttm_bo_validate(&res->backup->base,
1000 res->func->backup_placement,
1001 true, false, false);
1003 if (unlikely(ret != 0))
1004 goto out_no_validate;
1009 ttm_eu_backoff_reservation(&val_list);
1011 ttm_bo_unref(&val_buf->bo);
1013 vmw_dmabuf_unreference(&res->backup);
1019 * vmw_resource_reserve - Reserve a resource for command submission
1021 * @res: The resource to reserve.
1023 * This function takes the resource off the LRU list and make sure
1024 * a backup buffer is present for guest-backed resources. However,
1025 * the buffer may not be bound to the resource at this point.
1028 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1030 struct vmw_private *dev_priv = res->dev_priv;
1033 write_lock(&dev_priv->resource_lock);
1034 list_del_init(&res->lru_head);
1035 write_unlock(&dev_priv->resource_lock);
1037 if (res->func->needs_backup && res->backup == NULL &&
1039 ret = vmw_resource_buf_alloc(res, true);
1040 if (unlikely(ret != 0))
1048 * vmw_resource_backoff_reservation - Unreserve and unreference a
1051 * @val_buf: Backup buffer information.
1053 void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1055 struct list_head val_list;
1057 if (likely(val_buf->bo == NULL))
1060 INIT_LIST_HEAD(&val_list);
1061 list_add_tail(&val_buf->head, &val_list);
1062 ttm_eu_backoff_reservation(&val_list);
1063 ttm_bo_unref(&val_buf->bo);
1067 * vmw_resource_do_evict - Evict a resource, and transfer its data
1068 * to a backup buffer.
1070 * @res: The resource to evict.
1072 int vmw_resource_do_evict(struct vmw_resource *res)
1074 struct ttm_validate_buffer val_buf;
1075 const struct vmw_res_func *func = res->func;
1078 BUG_ON(!func->may_evict);
1081 ret = vmw_resource_check_buffer(res, true, &val_buf);
1082 if (unlikely(ret != 0))
1085 if (unlikely(func->unbind != NULL &&
1086 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1087 ret = func->unbind(res, res->res_dirty, &val_buf);
1088 if (unlikely(ret != 0))
1090 list_del_init(&res->mob_head);
1092 ret = func->destroy(res);
1093 res->backup_dirty = true;
1094 res->res_dirty = false;
1096 vmw_resource_backoff_reservation(&val_buf);
1103 * vmw_resource_validate - Make a resource up-to-date and visible
1106 * @res: The resource to make visible to the device.
1108 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1109 * be reserved and validated.
1110 * On hardware resource shortage, this function will repeatedly evict
1111 * resources of the same type until the validation succeeds.
1113 int vmw_resource_validate(struct vmw_resource *res)
1116 struct vmw_resource *evict_res;
1117 struct vmw_private *dev_priv = res->dev_priv;
1118 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1119 struct ttm_validate_buffer val_buf;
1121 if (likely(!res->func->may_evict))
1126 val_buf.bo = &res->backup->base;
1128 ret = vmw_resource_do_validate(res, &val_buf);
1129 if (likely(ret != -EBUSY))
1132 write_lock(&dev_priv->resource_lock);
1133 if (list_empty(lru_list) || !res->func->may_evict) {
1134 DRM_ERROR("Out of device device id entries "
1135 "for %s.\n", res->func->type_name);
1137 write_unlock(&dev_priv->resource_lock);
1141 evict_res = vmw_resource_reference
1142 (list_first_entry(lru_list, struct vmw_resource,
1144 list_del_init(&evict_res->lru_head);
1146 write_unlock(&dev_priv->resource_lock);
1147 vmw_resource_do_evict(evict_res);
1148 vmw_resource_unreference(&evict_res);
1151 if (unlikely(ret != 0))
1152 goto out_no_validate;
1153 else if (!res->func->needs_backup && res->backup) {
1154 list_del_init(&res->mob_head);
1155 vmw_dmabuf_unreference(&res->backup);
1165 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1166 * object without unreserving it.
1168 * @bo: Pointer to the struct ttm_buffer_object to fence.
1169 * @fence: Pointer to the fence. If NULL, this function will
1170 * insert a fence into the command stream..
1172 * Contrary to the ttm_eu version of this function, it takes only
1173 * a single buffer object instead of a list, and it also doesn't
1174 * unreserve the buffer object, which needs to be done separately.
1176 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1177 struct vmw_fence_obj *fence)
1179 struct ttm_bo_device *bdev = bo->bdev;
1180 struct ttm_bo_driver *driver = bdev->driver;
1181 struct vmw_fence_obj *old_fence_obj;
1182 struct vmw_private *dev_priv =
1183 container_of(bdev, struct vmw_private, bdev);
1186 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1188 driver->sync_obj_ref(fence);
1190 spin_lock(&bdev->fence_lock);
1192 old_fence_obj = bo->sync_obj;
1193 bo->sync_obj = fence;
1195 spin_unlock(&bdev->fence_lock);
1198 vmw_fence_obj_unreference(&old_fence_obj);
1202 * vmw_resource_move_notify - TTM move_notify_callback
1204 * @bo: The TTM buffer object about to move.
1205 * @mem: The truct ttm_mem_reg indicating to what memory
1206 * region the move is taking place.
1208 * For now does nothing.
1210 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1211 struct ttm_mem_reg *mem)
1216 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1218 * @res: The resource being queried.
1220 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1222 return res->func->needs_backup;
1226 * vmw_resource_evict_type - Evict all resources of a specific type
1228 * @dev_priv: Pointer to a device private struct
1229 * @type: The resource type to evict
1231 * To avoid thrashing starvation or as part of the hibernation sequence,
1232 * evict all evictable resources of a specific type.
1234 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1235 enum vmw_res_type type)
1237 struct list_head *lru_list = &dev_priv->res_lru[type];
1238 struct vmw_resource *evict_res;
1241 write_lock(&dev_priv->resource_lock);
1243 if (list_empty(lru_list))
1246 evict_res = vmw_resource_reference(
1247 list_first_entry(lru_list, struct vmw_resource,
1249 list_del_init(&evict_res->lru_head);
1250 write_unlock(&dev_priv->resource_lock);
1251 vmw_resource_do_evict(evict_res);
1252 vmw_resource_unreference(&evict_res);
1256 write_unlock(&dev_priv->resource_lock);
1260 * vmw_resource_evict_all - Evict all evictable resources
1262 * @dev_priv: Pointer to a device private struct
1264 * To avoid thrashing starvation or as part of the hibernation sequence,
1265 * evict all evictable resources. In particular this means that all
1266 * guest-backed resources that are registered with the device are
1267 * evicted and the OTable becomes clean.
1269 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1271 enum vmw_res_type type;
1273 mutex_lock(&dev_priv->cmdbuf_mutex);
1275 for (type = 0; type < vmw_res_max; ++type)
1276 vmw_resource_evict_type(dev_priv, type);
1278 mutex_unlock(&dev_priv->cmdbuf_mutex);