1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <drm/ttm/ttm_placement.h>
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
36 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
39 * @bo: Pointer to the TTM buffer object.
40 * Return: Pointer to the struct vmw_buffer_object embedding the
43 static struct vmw_buffer_object *
44 vmw_buffer_object(struct ttm_buffer_object *bo)
46 return container_of(bo, struct vmw_buffer_object, base);
50 * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
51 * @bo: ttm buffer object to be checked
53 * Uses destroy function associated with the object to determine if this is
54 * a &vmw_buffer_object.
57 * true if the object is of &vmw_buffer_object type, false if not.
59 static bool bo_is_vmw(struct ttm_buffer_object *bo)
61 return bo->destroy == &vmw_bo_bo_free ||
62 bo->destroy == &vmw_gem_destroy;
66 * vmw_bo_pin_in_placement - Validate a buffer to placement.
68 * @dev_priv: Driver private.
69 * @buf: DMA buffer to move.
70 * @placement: The placement to pin it.
71 * @interruptible: Use interruptible wait.
72 * Return: Zero on success, Negative error code on failure. In particular
73 * -ERESTARTSYS if interrupted by a signal
75 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
76 struct vmw_buffer_object *buf,
77 struct ttm_placement *placement,
80 struct ttm_operation_ctx ctx = {interruptible, false };
81 struct ttm_buffer_object *bo = &buf->base;
84 vmw_execbuf_release_pinned_bo(dev_priv);
86 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
87 if (unlikely(ret != 0))
90 ret = ttm_bo_validate(bo, placement, &ctx);
92 vmw_bo_pin_reserved(buf, true);
101 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
103 * This function takes the reservation_sem in write mode.
104 * Flushes and unpins the query bo to avoid failures.
106 * @dev_priv: Driver private.
107 * @buf: DMA buffer to move.
108 * @interruptible: Use interruptible wait.
109 * Return: Zero on success, Negative error code on failure. In particular
110 * -ERESTARTSYS if interrupted by a signal
112 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
113 struct vmw_buffer_object *buf,
116 struct ttm_operation_ctx ctx = {interruptible, false };
117 struct ttm_buffer_object *bo = &buf->base;
120 vmw_execbuf_release_pinned_bo(dev_priv);
122 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
123 if (unlikely(ret != 0))
126 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
127 if (likely(ret == 0) || ret == -ERESTARTSYS)
130 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
134 vmw_bo_pin_reserved(buf, true);
136 ttm_bo_unreserve(bo);
143 * vmw_bo_pin_in_vram - Move a buffer to vram.
145 * This function takes the reservation_sem in write mode.
146 * Flushes and unpins the query bo to avoid failures.
148 * @dev_priv: Driver private.
149 * @buf: DMA buffer to move.
150 * @interruptible: Use interruptible wait.
151 * Return: Zero on success, Negative error code on failure. In particular
152 * -ERESTARTSYS if interrupted by a signal
154 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
155 struct vmw_buffer_object *buf,
158 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
164 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
166 * This function takes the reservation_sem in write mode.
167 * Flushes and unpins the query bo to avoid failures.
169 * @dev_priv: Driver private.
170 * @buf: DMA buffer to pin.
171 * @interruptible: Use interruptible wait.
172 * Return: Zero on success, Negative error code on failure. In particular
173 * -ERESTARTSYS if interrupted by a signal
175 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
176 struct vmw_buffer_object *buf,
179 struct ttm_operation_ctx ctx = {interruptible, false };
180 struct ttm_buffer_object *bo = &buf->base;
181 struct ttm_placement placement;
182 struct ttm_place place;
185 place = vmw_vram_placement.placement[0];
186 place.lpfn = PFN_UP(bo->resource->size);
187 placement.num_placement = 1;
188 placement.placement = &place;
189 placement.num_busy_placement = 1;
190 placement.busy_placement = &place;
192 vmw_execbuf_release_pinned_bo(dev_priv);
193 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
194 if (unlikely(ret != 0))
198 * Is this buffer already in vram but not at the start of it?
199 * In that case, evict it first because TTM isn't good at handling
202 if (bo->resource->mem_type == TTM_PL_VRAM &&
203 bo->resource->start < PFN_UP(bo->resource->size) &&
204 bo->resource->start > 0 &&
205 buf->base.pin_count == 0) {
206 ctx.interruptible = false;
207 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
210 ret = ttm_bo_validate(bo, &placement, &ctx);
212 /* For some reason we didn't end up at the start of vram */
213 WARN_ON(ret == 0 && bo->resource->start != 0);
215 vmw_bo_pin_reserved(buf, true);
217 ttm_bo_unreserve(bo);
225 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
227 * This function takes the reservation_sem in write mode.
229 * @dev_priv: Driver private.
230 * @buf: DMA buffer to unpin.
231 * @interruptible: Use interruptible wait.
232 * Return: Zero on success, Negative error code on failure. In particular
233 * -ERESTARTSYS if interrupted by a signal
235 int vmw_bo_unpin(struct vmw_private *dev_priv,
236 struct vmw_buffer_object *buf,
239 struct ttm_buffer_object *bo = &buf->base;
242 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
243 if (unlikely(ret != 0))
246 vmw_bo_pin_reserved(buf, false);
248 ttm_bo_unreserve(bo);
255 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
258 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
259 * @ptr: SVGAGuestPtr returning the result.
261 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
264 if (bo->resource->mem_type == TTM_PL_VRAM) {
265 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
266 ptr->offset = bo->resource->start << PAGE_SHIFT;
268 ptr->gmrId = bo->resource->start;
275 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
277 * @vbo: The buffer object. Must be reserved.
278 * @pin: Whether to pin or unpin.
281 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
283 struct ttm_operation_ctx ctx = { false, true };
285 struct ttm_placement placement;
286 struct ttm_buffer_object *bo = &vbo->base;
287 uint32_t old_mem_type = bo->resource->mem_type;
290 dma_resv_assert_held(bo->base.resv);
292 if (pin == !!bo->pin_count)
297 pl.mem_type = bo->resource->mem_type;
298 pl.flags = bo->resource->placement;
300 memset(&placement, 0, sizeof(placement));
301 placement.num_placement = 1;
302 placement.placement = &pl;
304 ret = ttm_bo_validate(bo, &placement, &ctx);
306 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
315 * vmw_bo_map_and_cache - Map a buffer object and cache the map
317 * @vbo: The buffer object to map
318 * Return: A kernel virtual address or NULL if mapping failed.
320 * This function maps a buffer object into the kernel address space, or
321 * returns the virtual kernel address of an already existing map. The virtual
322 * address remains valid as long as the buffer object is pinned or reserved.
323 * The cached map is torn down on either
324 * 1) Buffer object move
325 * 2) Buffer object swapout
326 * 3) Buffer object destruction
329 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
331 struct ttm_buffer_object *bo = &vbo->base;
336 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
340 ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
342 DRM_ERROR("Buffer object map failed: %d.\n", ret);
344 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
349 * vmw_bo_unmap - Tear down a cached buffer object map.
351 * @vbo: The buffer object whose map we are tearing down.
353 * This function tears down a cached map set up using
354 * vmw_buffer_object_map_and_cache().
356 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
358 if (vbo->map.bo == NULL)
361 ttm_bo_kunmap(&vbo->map);
366 * vmw_bo_bo_free - vmw buffer object destructor
368 * @bo: Pointer to the embedded struct ttm_buffer_object
370 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
372 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
374 WARN_ON(vmw_bo->dirty);
375 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
376 vmw_bo_unmap(vmw_bo);
377 drm_gem_object_release(&bo->base);
381 /* default destructor */
382 static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
388 * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
390 * @dev_priv: Pointer to the device private struct
391 * @size: size of the BO we need
392 * @placement: where to put it
393 * @p_bo: resulting BO
395 * Creates and pin a simple BO for in kernel use.
397 int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
398 struct ttm_placement *placement,
399 struct ttm_buffer_object **p_bo)
401 struct ttm_operation_ctx ctx = {
402 .interruptible = false,
405 struct ttm_buffer_object *bo;
406 struct drm_device *vdev = &dev_priv->drm;
409 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
413 size = ALIGN(size, PAGE_SIZE);
415 drm_gem_private_object_init(vdev, &bo->base, size);
417 ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
418 placement, 0, &ctx, NULL, NULL,
419 vmw_bo_default_destroy);
424 ttm_bo_unreserve(bo);
434 int vmw_bo_create(struct vmw_private *vmw,
435 size_t size, struct ttm_placement *placement,
436 bool interruptible, bool pin,
437 void (*bo_free)(struct ttm_buffer_object *bo),
438 struct vmw_buffer_object **p_bo)
444 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
445 if (unlikely(!*p_bo)) {
446 DRM_ERROR("Failed to allocate a buffer.\n");
450 ret = vmw_bo_init(vmw, *p_bo, size,
451 placement, interruptible, pin,
453 if (unlikely(ret != 0))
464 * vmw_bo_init - Initialize a vmw buffer object
466 * @dev_priv: Pointer to the device private struct
467 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
468 * @size: Buffer object size in bytes.
469 * @placement: Initial placement.
470 * @interruptible: Whether waits should be performed interruptible.
471 * @pin: If the BO should be created pinned at a fixed location.
472 * @bo_free: The buffer object destructor.
473 * Returns: Zero on success, negative error code on error.
475 * Note that on error, the code will free the buffer object.
477 int vmw_bo_init(struct vmw_private *dev_priv,
478 struct vmw_buffer_object *vmw_bo,
479 size_t size, struct ttm_placement *placement,
480 bool interruptible, bool pin,
481 void (*bo_free)(struct ttm_buffer_object *bo))
483 struct ttm_operation_ctx ctx = {
484 .interruptible = interruptible,
487 struct ttm_device *bdev = &dev_priv->bdev;
488 struct drm_device *vdev = &dev_priv->drm;
491 WARN_ON_ONCE(!bo_free);
492 memset(vmw_bo, 0, sizeof(*vmw_bo));
493 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
494 vmw_bo->base.priority = 3;
495 vmw_bo->res_tree = RB_ROOT;
497 size = ALIGN(size, PAGE_SIZE);
498 drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
500 ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
501 placement, 0, &ctx, NULL, NULL, bo_free);
507 ttm_bo_pin(&vmw_bo->base);
508 ttm_bo_unreserve(&vmw_bo->base);
514 * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
515 * access, idling previous GPU operations on the buffer and optionally
516 * blocking it for further command submissions.
518 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
519 * @flags: Flags indicating how the grab should be performed.
520 * Return: Zero on success, Negative error code on error. In particular,
521 * -EBUSY will be returned if a dontblock operation is requested and the
522 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
523 * interrupted by a signal.
525 * A blocking grab will be automatically released when @tfile is closed.
527 static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
530 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
531 struct ttm_buffer_object *bo = &vmw_bo->base;
534 if (flags & drm_vmw_synccpu_allow_cs) {
537 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
539 MAX_SCHEDULE_TIMEOUT);
547 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
548 if (unlikely(ret != 0))
551 ret = ttm_bo_wait(bo, true, nonblock);
552 if (likely(ret == 0))
553 atomic_inc(&vmw_bo->cpu_writers);
555 ttm_bo_unreserve(bo);
556 if (unlikely(ret != 0))
563 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
564 * and unblock command submission on the buffer if blocked.
566 * @filp: Identifying the caller.
567 * @handle: Handle identifying the buffer object.
568 * @flags: Flags indicating the type of release.
570 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
574 struct vmw_buffer_object *vmw_bo;
575 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
578 if (!(flags & drm_vmw_synccpu_allow_cs)) {
579 atomic_dec(&vmw_bo->cpu_writers);
581 ttm_bo_put(&vmw_bo->base);
589 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
592 * @dev: Identifies the drm device.
593 * @data: Pointer to the ioctl argument.
594 * @file_priv: Identifies the caller.
595 * Return: Zero on success, negative error code on error.
597 * This function checks the ioctl arguments for validity and calls the
598 * relevant synccpu functions.
600 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
601 struct drm_file *file_priv)
603 struct drm_vmw_synccpu_arg *arg =
604 (struct drm_vmw_synccpu_arg *) data;
605 struct vmw_buffer_object *vbo;
608 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
609 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
610 drm_vmw_synccpu_dontblock |
611 drm_vmw_synccpu_allow_cs)) != 0) {
612 DRM_ERROR("Illegal synccpu flags.\n");
617 case drm_vmw_synccpu_grab:
618 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
619 if (unlikely(ret != 0))
622 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
623 vmw_bo_unreference(&vbo);
624 if (unlikely(ret != 0)) {
625 if (ret == -ERESTARTSYS || ret == -EBUSY)
627 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
628 (unsigned int) arg->handle);
632 case drm_vmw_synccpu_release:
633 ret = vmw_user_bo_synccpu_release(file_priv,
636 if (unlikely(ret != 0)) {
637 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
638 (unsigned int) arg->handle);
643 DRM_ERROR("Invalid synccpu operation.\n");
651 * vmw_bo_unref_ioctl - Generic handle close ioctl.
653 * @dev: Identifies the drm device.
654 * @data: Pointer to the ioctl argument.
655 * @file_priv: Identifies the caller.
656 * Return: Zero on success, negative error code on error.
658 * This function checks the ioctl arguments for validity and closes a
659 * handle to a TTM base object, optionally freeing the object.
661 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
662 struct drm_file *file_priv)
664 struct drm_vmw_unref_dmabuf_arg *arg =
665 (struct drm_vmw_unref_dmabuf_arg *)data;
667 drm_gem_handle_delete(file_priv, arg->handle);
673 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
675 * @filp: The file the handle is registered with.
676 * @handle: The user buffer object handle
677 * @out: Pointer to a where a pointer to the embedded
678 * struct vmw_buffer_object should be placed.
679 * Return: Zero on success, Negative error code on error.
681 * The vmw buffer object pointer will be refcounted.
683 int vmw_user_bo_lookup(struct drm_file *filp,
685 struct vmw_buffer_object **out)
687 struct drm_gem_object *gobj;
689 gobj = drm_gem_object_lookup(filp, handle);
691 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
692 (unsigned long)handle);
696 *out = gem_to_vmw_bo(gobj);
697 ttm_bo_get(&(*out)->base);
698 drm_gem_object_put(gobj);
704 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
705 * @filp: The TTM object file the handle is registered with.
706 * @handle: The user buffer object handle.
708 * This function looks up a struct vmw_bo and returns a pointer to the
709 * struct vmw_buffer_object it derives from without refcounting the pointer.
710 * The returned pointer is only valid until vmw_user_bo_noref_release() is
711 * called, and the object pointed to by the returned pointer may be doomed.
712 * Any persistent usage of the object requires a refcount to be taken using
713 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
714 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
715 * or scheduling functions may be called in between these function calls.
717 * Return: A struct vmw_buffer_object pointer if successful or negative
718 * error pointer on failure.
720 struct vmw_buffer_object *
721 vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
723 struct vmw_buffer_object *vmw_bo;
724 struct ttm_buffer_object *bo;
725 struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
728 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
729 (unsigned long)handle);
730 return ERR_PTR(-ESRCH);
732 vmw_bo = gem_to_vmw_bo(gobj);
733 bo = ttm_bo_get_unless_zero(&vmw_bo->base);
734 vmw_bo = vmw_buffer_object(bo);
735 drm_gem_object_put(gobj);
742 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
743 * object without unreserving it.
745 * @bo: Pointer to the struct ttm_buffer_object to fence.
746 * @fence: Pointer to the fence. If NULL, this function will
747 * insert a fence into the command stream..
749 * Contrary to the ttm_eu version of this function, it takes only
750 * a single buffer object instead of a list, and it also doesn't
751 * unreserve the buffer object, which needs to be done separately.
753 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
754 struct vmw_fence_obj *fence)
756 struct ttm_device *bdev = bo->bdev;
757 struct vmw_private *dev_priv =
758 container_of(bdev, struct vmw_private, bdev);
762 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
764 dma_fence_get(&fence->base);
766 ret = dma_resv_reserve_fences(bo->base.resv, 1);
768 dma_resv_add_fence(bo->base.resv, &fence->base,
769 DMA_RESV_USAGE_KERNEL);
771 /* Last resort fallback when we are OOM */
772 dma_fence_wait(&fence->base, false);
773 dma_fence_put(&fence->base);
778 * vmw_dumb_create - Create a dumb kms buffer
780 * @file_priv: Pointer to a struct drm_file identifying the caller.
781 * @dev: Pointer to the drm device.
782 * @args: Pointer to a struct drm_mode_create_dumb structure
783 * Return: Zero on success, negative error code on failure.
785 * This is a driver callback for the core drm create_dumb functionality.
786 * Note that this is very similar to the vmw_bo_alloc ioctl, except
787 * that the arguments have a different format.
789 int vmw_dumb_create(struct drm_file *file_priv,
790 struct drm_device *dev,
791 struct drm_mode_create_dumb *args)
793 struct vmw_private *dev_priv = vmw_priv(dev);
794 struct vmw_buffer_object *vbo;
795 int cpp = DIV_ROUND_UP(args->bpp, 8);
799 case 1: /* DRM_FORMAT_C8 */
800 case 2: /* DRM_FORMAT_RGB565 */
801 case 4: /* DRM_FORMAT_XRGB8888 */
805 * Dumb buffers don't allow anything else.
806 * This is tested via IGT's dumb_buffers
811 args->pitch = args->width * cpp;
812 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
814 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
815 args->size, &args->handle,
822 * vmw_bo_swap_notify - swapout notify callback.
824 * @bo: The buffer object to be swapped out.
826 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
828 /* Is @bo embedded in a struct vmw_buffer_object? */
832 /* Kill any cached kernel maps before swapout */
833 vmw_bo_unmap(vmw_buffer_object(bo));
838 * vmw_bo_move_notify - TTM move_notify_callback
840 * @bo: The TTM buffer object about to move.
841 * @mem: The struct ttm_resource indicating to what memory
842 * region the move is taking place.
844 * Detaches cached maps and device bindings that require that the
845 * buffer doesn't move.
847 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
848 struct ttm_resource *mem)
850 struct vmw_buffer_object *vbo;
852 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
856 vbo = container_of(bo, struct vmw_buffer_object, base);
859 * Kill any cached kernel maps before move to or from VRAM.
860 * With other types of moves, the underlying pages stay the same,
861 * and the map can be kept.
863 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
867 * If we're moving a backup MOB out of MOB placement, then make sure we
868 * read back all resource content first, and unbind the MOB from
871 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
872 vmw_resource_unbind_list(vbo);