if (unlikely(ret != 0))
goto err;
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
out_unreserve:
if (!ret)
{
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
int ret = 0;
- place = vmw_vram_placement.placement[0];
- place.lpfn = PFN_UP(bo->resource->size);
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
bo->resource->start > 0 &&
buf->base.pin_count == 0) {
ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_SYS,
+ VMW_BO_DOMAIN_SYS);
+ (void)ttm_bo_validate(bo, &buf->placement, &ctx);
}
- ret = ttm_bo_validate(bo, &placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ buf->places[0].lpfn = PFN_UP(bo->resource->size);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->resource->start != 0);
}
int vmw_bo_create(struct vmw_private *vmw,
- size_t size, struct ttm_placement *placement,
+ size_t size, u32 domain, u32 busy_domain,
bool interruptible, bool pin,
struct vmw_bo **p_bo)
{
}
ret = vmw_bo_init(vmw, *p_bo, size,
- placement, interruptible, pin);
+ domain, busy_domain,
+ interruptible, pin);
if (unlikely(ret != 0))
goto out_error;
* @dev_priv: Pointer to the device private struct
* @vmw_bo: Pointer to the struct vmw_bo to initialize.
* @size: Buffer object size in bytes.
- * @placement: Initial placement.
+ * @domain: Domain to put the bo in.
+ * @busy_domain: Domain to put the bo if busy.
* @interruptible: Whether waits should be performed interruptible.
* @pin: If the BO should be created pinned at a fixed location.
* Returns: Zero on success, negative error code on error.
*/
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_bo *vmw_bo,
- size_t size, struct ttm_placement *placement,
+ size_t size,
+ u32 domain,
+ u32 busy_domain,
bool interruptible, bool pin)
{
struct ttm_operation_ctx ctx = {
size = ALIGN(size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
+ vmw_bo_placement_set(vmw_bo, domain, busy_domain);
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
- placement, 0, &ctx, NULL, NULL, vmw_bo_free);
+ &vmw_bo->placement, 0, &ctx, NULL, NULL, vmw_bo_free);
if (unlikely(ret)) {
return ret;
}
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+static u32
+set_placement_list(struct ttm_place *pl, u32 domain)
+{
+ u32 n = 0;
+
+ /*
+ * The placements are ordered according to our preferences
+ */
+ if (domain & VMW_BO_DOMAIN_MOB) {
+ pl[n].mem_type = VMW_PL_MOB;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_GMR) {
+ pl[n].mem_type = VMW_PL_GMR;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_VRAM) {
+ pl[n].mem_type = TTM_PL_VRAM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ WARN_ON((domain & VMW_BO_DOMAIN_WAITABLE_SYS) != 0);
+ if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
+ pl[n].mem_type = VMW_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_SYS) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+
+ WARN_ON(!n);
+ if (!n) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ return n;
+}
+
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
+{
+ struct ttm_device *bdev = bo->base.bdev;
+ struct vmw_private *vmw =
+ container_of(bdev, struct vmw_private, bdev);
+ struct ttm_placement *pl = &bo->placement;
+ bool mem_compatible = false;
+ u32 i;
+
+ pl->placement = bo->places;
+ pl->num_placement = set_placement_list(bo->places, domain);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER) && bo->base.resource) {
+ for (i = 0; i < pl->num_placement; ++i) {
+ if (bo->base.resource->mem_type == TTM_PL_SYSTEM ||
+ bo->base.resource->mem_type == pl->placement[i].mem_type)
+ mem_compatible = true;
+ }
+ if (!mem_compatible)
+ drm_warn(&vmw->drm,
+ "%s: Incompatible transition from "
+ "bo->base.resource->mem_type = %u to domain = %u\n",
+ __func__, bo->base.resource->mem_type, domain);
+ }
+
+ pl->busy_placement = bo->busy_places;
+ pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
+}
+
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
+{
+ struct ttm_device *bdev = bo->base.bdev;
+ struct vmw_private *vmw =
+ container_of(bdev, struct vmw_private, bdev);
+ u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
+
+ if (vmw->has_mob)
+ domain = VMW_BO_DOMAIN_MOB;
+
+ vmw_bo_placement_set(bo, domain, domain);
+}
#include "device_include/svga_reg.h"
#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <linux/rbtree_types.h>
#include <linux/types.h>
struct vmw_private;
struct vmw_resource;
+enum vmw_bo_domain {
+ VMW_BO_DOMAIN_SYS = BIT(0),
+ VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1),
+ VMW_BO_DOMAIN_VRAM = BIT(2),
+ VMW_BO_DOMAIN_GMR = BIT(3),
+ VMW_BO_DOMAIN_MOB = BIT(4),
+};
+
/**
* struct vmw_bo - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
*/
struct vmw_bo {
struct ttm_buffer_object base;
+
+ struct ttm_placement placement;
+ struct ttm_place places[5];
+ struct ttm_place busy_places[5];
+
struct rb_root res_tree;
atomic_t cpu_writers;
struct vmw_bo_dirty *dirty;
};
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
+
int vmw_bo_create_kernel(struct vmw_private *dev_priv,
unsigned long size,
struct ttm_placement *placement,
struct ttm_buffer_object **p_bo);
int vmw_bo_create(struct vmw_private *dev_priv,
- size_t size, struct ttm_placement *placement,
+ size_t size,
+ u32 domain,
+ u32 busy_domain,
bool interruptible, bool pin,
struct vmw_bo **p_bo);
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_bo *vmw_bo,
- size_t size, struct ttm_placement *placement,
+ size_t size,
+ u32 domain,
+ u32 busy_domain,
bool interruptible, bool pin);
int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
.needs_backup = false,
.may_evict = false,
.type_name = "legacy contexts",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_context_create,
.destroy = vmw_gb_context_destroy,
.bind = vmw_gb_context_bind,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
.prio = 3,
.dirty_prio = 3,
.type_name = "context guest backed object tables",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
+ ret = vmw_bo_create(dev_priv, new_size,
+ VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB,
true, true, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
* user of the bo currently.
*/
ret = vmw_bo_create(dev_priv, PAGE_SIZE,
- &vmw_sys_placement, false, true, &vbo);
+ VMW_BO_DOMAIN_SYS, VMW_BO_DOMAIN_SYS,
+ false, true, &vbo);
if (unlikely(ret != 0))
return ret;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
struct vmw_bo *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
- if (dx_query_mob)
+ if (dx_query_mob) {
+ vmw_bo_placement_set(dx_query_mob,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx,
- dx_query_mob, true, false);
+ dx_query_mob);
+ }
}
mutex_unlock(&dev_priv->binding_mutex);
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
+ vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- sw_context->cur_query_bo,
- dev_priv->has_mob, false);
+ sw_context->cur_query_bo);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
+ vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, false);
+ dev_priv->dummy_query_bo);
if (unlikely(ret != 0))
return ret;
}
drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
ttm_bo_put(&vmw_bo->base);
if (unlikely(ret != 0))
return ret;
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->pinned_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
if (ret)
goto out_no_reserve;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->dummy_query_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
if (ret)
goto out_no_reserve;
int ret;
ret = vmw_bo_create(dev_priv, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement,
+ (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_SYS,
true, false, p_vbo);
(*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
user_fence_rep, vclips, num_clips,
NULL);
case vmw_du_screen_target:
- return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
- user_fence_rep, NULL, vclips, num_clips,
- 1, false, true, NULL);
+ return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
struct vmw_framebuffer_bo *vfbbo =
container_of(update->vfb, typeof(*vfbbo), base);
- ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
- update->cpu_blit);
+ /*
+ * For screen targets we want a mappable bo, for everything else we want
+ * accelerated i.e. host backed (vram or gmr) bo. If the display unit
+ * is not screen target then mob's shouldn't be available.
+ */
+ if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
+ vmw_bo_placement_set(vfbbo->buffer,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
+ } else {
+ WARN_ON(update->dev_priv->has_mob);
+ vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
+ }
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
struct vmw_framebuffer *vfb;
struct vmw_fence_obj **out_fence;
struct mutex *mutex;
- bool cpu_blit;
bool intr;
};
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc);
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc);
int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
}
ret = vmw_bo_create(res->dev_priv, res->backup_size,
- res->func->backup_placement,
+ res->func->domain, res->func->busy_domain,
interruptible, false, &backup);
if (unlikely(ret != 0))
goto out_no_bo;
return 0;
backup_dirty = res->backup_dirty;
+ vmw_bo_placement_set(res->backup, res->func->domain,
+ res->func->busy_domain);
ret = ttm_bo_validate(&res->backup->base,
- res->func->backup_placement,
+ &res->backup->placement,
&ctx);
if (unlikely(ret != 0))
if (ret)
goto out_no_validate;
if (!vbo->base.pin_count) {
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
ret = ttm_bo_validate
(&vbo->base,
- res->func->backup_placement,
+ &vbo->placement,
&ctx);
if (ret) {
ttm_bo_unreserve(&vbo->base);
enum vmw_res_type res_type;
bool needs_backup;
const char *type_name;
- struct ttm_placement *backup_placement;
+ u32 domain;
+ u32 busy_domain;
bool may_evict;
u32 prio;
u32 dirty_prio;
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_create(dev_priv, size,
- &vmw_vram_placement,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM,
false, true, &vps->bo);
vmw_overlay_resume_all(dev_priv);
if (ret) {
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = false;
bo_update.base.intr = true;
bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
srf_update.base.vfb = vfb;
srf_update.base.out_fence = out_fence;
srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.base.cpu_blit = false;
srf_update.base.intr = true;
srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_shader_create,
.destroy = vmw_gb_shader_destroy,
.bind = vmw_gb_shader_bind,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_shader_create,
/*
* The destroy callback is only called with a committed resource on
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
+ ret = vmw_bo_create(dev_priv, size,
+ VMW_BO_DOMAIN_SYS,
+ VMW_BO_DOMAIN_SYS,
true, true, &buf);
if (unlikely(ret != 0))
goto out;
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
- ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_SYS,
+ VMW_BO_DOMAIN_SYS);
+ ret = ttm_bo_validate(&buf->base, &buf->placement, &ctx);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
.needs_backup = false,
.may_evict = false,
.type_name = "DX view",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = vmw_view_create,
.commit_notify = vmw_view_commit_notify,
};
*/
struct vmw_stdu_dirty {
struct vmw_kms_dirty base;
- SVGA3dTransferType transfer;
s32 left, right, top, bottom;
s32 fb_left, fb_top;
u32 pitch;
* Screen Target Display Unit CRTC Functions
*****************************************************************************/
-static bool vmw_stdu_use_cpu_blit(const struct vmw_private *vmw)
-{
- return !(vmw->capabilities & SVGA_CAP_3D) || vmw->vram_size < (32 * 1024 * 1024);
-}
-
-
/**
* vmw_stdu_crtc_destroy - cleans up the STDU
*
}
/**
- * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
- *
- * @dirty: The closure structure.
- *
- * Encodes a surface DMA command cliprect and updates the bounding box
- * for the DMA.
- */
-static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
-
- blit += dirty->num_hits;
- blit->srcx = dirty->fb_x;
- blit->srcy = dirty->fb_y;
- blit->x = dirty->unit_x1;
- blit->y = dirty->unit_y1;
- blit->d = 1;
- blit->w = dirty->unit_x2 - dirty->unit_x1;
- blit->h = dirty->unit_y2 - dirty->unit_y1;
- dirty->num_hits++;
-
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
- return;
-
- /* Destination bounding box */
- ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
- ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
- ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
- ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
-}
-
-/**
- * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
- *
- * @dirty: The closure structure.
- *
- * Fills in the missing fields in a DMA command, and optionally encodes
- * a screen target update command, depending on transfer direction.
- */
-static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_screen_target_display_unit *stdu =
- container_of(dirty->unit, typeof(*stdu), base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
- SVGA3dCmdSurfaceDMASuffix *suffix =
- (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
- size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
-
- if (!dirty->num_hits) {
- vmw_cmd_commit(dirty->dev_priv, 0);
- return;
- }
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd->header.size = sizeof(cmd->body) + blit_size;
- vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
- cmd->body.guest.pitch = ddirty->pitch;
- cmd->body.host.sid = stdu->display_srf->res.id;
- cmd->body.host.face = 0;
- cmd->body.host.mipmap = 0;
- cmd->body.transfer = ddirty->transfer;
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.base.size;
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
- blit_size += sizeof(struct vmw_stdu_update);
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
- ddirty->left, ddirty->right,
- ddirty->top, ddirty->bottom);
- }
-
- vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
-
- stdu->display_srf->res.res_dirty = true;
- ddirty->left = ddirty->top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
-}
-
-
-/**
* vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
return;
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
- dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- dst_bo = &stdu->display_srf->res.backup->base;
- dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
-
- src_pitch = ddirty->pitch;
- src_bo = &ddirty->buf->base;
- src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
+ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+ src_bo = &stdu->display_srf->res.backup->base;
+ src_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
- /* Swap src and dst if the assumption was wrong. */
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
- swap(dst_pitch, src_pitch);
- swap(dst_bo, src_bo);
- swap(src_offset, dst_offset);
- }
+ dst_pitch = ddirty->pitch;
+ dst_bo = &ddirty->buf->base;
+ dst_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
src_bo, src_offset, src_pitch,
width * stdu->cpp, height, &diff);
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
- drm_rect_visible(&diff.rect)) {
- struct vmw_private *dev_priv;
- struct vmw_stdu_update *cmd;
- struct drm_clip_rect region;
- int ret;
-
- /* We are updating the actual surface, not a proxy */
- region.x1 = diff.rect.x1;
- region.x2 = diff.rect.x2;
- region.y1 = diff.rect.y1;
- region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(&stdu->display_srf->res, ®ion,
- 1, 1);
- if (ret)
- goto out_cleanup;
-
-
- dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
- if (!cmd)
- goto out_cleanup;
-
- vmw_stdu_populate_update(cmd, stdu->base.unit,
- region.x1, region.x2,
- region.y1, region.y2);
-
- vmw_cmd_commit(dev_priv, sizeof(*cmd));
- }
-
-out_cleanup:
- ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
+ * vmw_kms_stdu_readback - Perform a readback from a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
* be NULL.
* @num_clips: Number of clip rects in @clips or @vclips.
* @increment: Increment to use when looping over @clips or @vclips.
- * @to_surface: Whether to DMA to the screen target system as opposed to
- * from the screen target system.
- * @interruptible: Whether to perform waits interruptible if possible.
* @crtc: If crtc is passed, perform stdu dma on that crtc only.
*
* If DMA-ing till the screen target system, the function will also notify
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc)
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc)
{
struct vmw_bo *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
- bool cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/*
- * VMs without 3D support don't have the surface DMA command and
- * we'll be using a CPU blit, and the framebuffer should be moved out
- * of VRAM.
+ * The GMR domain might seem confusing because it might seem like it should
+ * never happen with screen targets but e.g. the xorg vmware driver issues
+ * CMD_SURFACE_DMA for various pixmap updates which might transition our bo to
+ * a GMR. Instead of forcing another transition we can optimize the readback
+ * by reading directly from the GMR.
*/
- ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
- ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ ret = vmw_validation_prepare(&val_ctx, NULL, true);
if (ret)
goto out_unref;
- ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
- ddirty.base.clip = vmw_stdu_bo_clip;
- ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
- num_clips * sizeof(SVGA3dCopyBox) +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- if (to_surface)
- ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
-
-
- if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
- ddirty.base.clip = vmw_stdu_bo_cpu_clip;
- ddirty.base.fifo_reserve_size = 0;
- }
+
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
+ ddirty.base.fifo_reserve_size = 0;
ddirty.base.crtc = crtc;
/*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a buffer object then we have to use CPU blit
- * so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_BO &&
- vmw_stdu_use_cpu_blit(dev_priv))
+ if (vps->content_fb_type == SEPARATE_BO)
vps->cpp = new_fb->pitches[0] / new_fb->width;
return 0;
return ret;
}
-static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
- uint32_t num_hits)
-{
- return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix) +
- sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
sizeof(struct vmw_stdu_update);
}
-static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
- void *cmd, uint32_t num_hits)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- struct vmw_stdu_dma *cmd_dma = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd_dma->header.size = sizeof(cmd_dma->body) +
- sizeof(struct SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
- cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
- cmd_dma->body.host.sid = stdu->display_srf->res.id;
- cmd_dma->body.host.face = 0;
- cmd_dma->body.host.mipmap = 0;
- cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
-
- return sizeof(*cmd_dma);
-}
-
-static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *clip,
- uint32_t fb_x, uint32_t fb_y)
-{
- struct SVGA3dCopyBox *box = cmd;
-
- box->srcx = fb_x;
- box->srcy = fb_y;
- box->srcz = 0;
- box->x = clip->x1;
- box->y = clip->y1;
- box->z = 0;
- box->w = drm_rect_width(clip);
- box->h = drm_rect_height(clip);
- box->d = 1;
-
- return sizeof(*box);
-}
-
-static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *bb)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.base.size;
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
- bb->y1, bb->y2);
-
- return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
void *cmd, uint32_t num_hits)
{
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
bo_update.base.intr = false;
- /*
- * VM without 3D support don't have surface DMA command and framebuffer
- * should be moved out of VRAM.
- */
- if (bo_update.base.cpu_blit) {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
- bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
- bo_update.base.clip = vmw_stdu_bo_clip_cpu;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
- } else {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
- bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
- bo_update.base.clip = vmw_stdu_bo_populate_clip;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update;
- }
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
return vmw_du_helper_plane_update(&bo_update.base);
}
srf_update.vfb = vfb;
srf_update.out_fence = out_fence;
srf_update.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.cpu_blit = false;
srf_update.intr = true;
if (vfbs->is_bo_proxy)
.needs_backup = true,
.may_evict = false,
.type_name = "DX streamoutput",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_streamoutput_create,
.destroy = NULL, /* Command buffer managed resource. */
.bind = vmw_dx_streamoutput_bind,
.prio = 1,
.dirty_prio = 1,
.type_name = "legacy surfaces",
- .backup_placement = &vmw_srf_placement,
+ .domain = VMW_BO_DOMAIN_GMR,
+ .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
.prio = 1,
.dirty_prio = 2,
.type_name = "guest backed surfaces",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
}
};
-static const struct ttm_place gmr_vram_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }
-};
-
static const struct ttm_place vmw_sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.busy_placement = &vmw_sys_placement_flags
};
-static const struct ttm_place nonfixed_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
-};
-
-struct ttm_placement vmw_srf_placement = {
- .num_placement = 1,
- .num_busy_placement = 2,
- .placement = &gmr_placement_flags,
- .busy_placement = gmr_vram_placement_flags
-};
-
struct ttm_placement vmw_mob_placement = {
.num_placement = 1,
.num_busy_placement = 1,
.busy_placement = &mob_placement_flags
};
-struct ttm_placement vmw_nonfixed_placement = {
- .num_placement = 3,
- .placement = nonfixed_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
.needs_backup = false,
.may_evict = false,
.type_name = "overlay stream",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
**************************************************************************/
#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#include "vmwgfx_validation.h"
#include <linux/slab.h>
* @hash: A hash entry used for the duplicate detection hash table.
* @coherent_count: If switching backup buffers, number of new coherent
* resources that will have this buffer as a backup buffer.
- * @as_mob: Validate as mob.
- * @cpu_blit: Validate for cpu blit access.
*
* Bit fields are used since these structures are allocated and freed in
* large numbers and space conservation is desired.
struct ttm_validate_buffer base;
struct vmwgfx_hash_item hash;
unsigned int coherent_count;
- u32 as_mob : 1;
- u32 cpu_blit : 1;
};
/**
* struct vmw_validation_res_node - Resource validation metadata.
* vmw_validation_add_bo - Add a buffer object to the validation context.
* @ctx: The validation context.
* @vbo: The buffer object.
- * @as_mob: Validate as mob, otherwise suitable for GMR operations.
- * @cpu_blit: Validate in a page-mappable location.
*
* Return: Zero on success, negative error code otherwise.
*/
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_bo *vbo,
- bool as_mob,
- bool cpu_blit)
+ struct vmw_bo *vbo)
{
struct vmw_validation_bo_node *bo_node;
bo_node = vmw_validation_find_bo_dup(ctx, vbo);
- if (bo_node) {
- if (bo_node->as_mob != as_mob ||
- bo_node->cpu_blit != cpu_blit) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- } else {
+ if (!bo_node) {
struct ttm_validate_buffer *val_buf;
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
return -ESRCH;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
- bo_node->as_mob = as_mob;
- bo_node->cpu_blit = cpu_blit;
}
return 0;
if (res->backup) {
struct vmw_bo *vbo = res->backup;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
goto out_unreserve;
}
* vmw_validation_bo_validate_single - Validate a single buffer object.
* @bo: The TTM buffer object base.
* @interruptible: Whether to perform waits interruptible if possible.
- * @validate_as_mob: Whether to validate in MOB memory.
*
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
* code on failure.
*/
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
+static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible)
{
struct vmw_bo *vbo =
container_of(bo, struct vmw_bo, base);
if (vbo->base.pin_count > 0)
return 0;
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
if (ret == 0 || ret == -ERESTARTSYS)
return ret;
- /**
- * If that failed, try VRAM again, this time evicting
+ /*
+ * If that failed, try again, this time evicting
* previous contents.
*/
+ ctx.allow_res_evict = true;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
+ return ttm_bo_validate(bo, &vbo->placement, &ctx);
}
/**
struct vmw_bo *vbo =
container_of(entry->base.bo, typeof(*vbo), base);
- if (entry->cpu_blit) {
- struct ttm_operation_ctx ttm_ctx = {
- .interruptible = intr,
- .no_wait_gpu = false
- };
-
- ret = ttm_bo_validate(entry->base.bo,
- &vmw_nonfixed_placement, &ttm_ctx);
- } else {
- ret = vmw_validation_bo_validate_single
- (entry->base.bo, intr, entry->as_mob);
- }
+ ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
+
if (ret)
return ret;
if (backup && res->backup && (backup != res->backup)) {
struct vmw_bo *vbo = res->backup;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo, res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
return ret;
}
}
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_bo *vbo,
- bool as_mob, bool cpu_blit);
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob);
+ struct vmw_bo *vbo);
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,