1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
33 #define VMW_RES_HT_ORDER 12
36 * struct vmw_resource_relocation - Relocation info for resources
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
43 struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
50 * struct vmw_resource_val_node - Validation info for resources
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
65 struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings;
71 unsigned long new_backup_offset;
73 bool no_buffer_needed;
77 * struct vmw_cmd_entry - Describe a command for the verifier
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
83 struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
96 * vmw_resource_unreserve - unreserve resources previously reserved for
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
102 static void vmw_resource_list_unreserve(struct list_head *list,
105 struct vmw_resource_val_node *val;
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
112 if (unlikely(val->staged_bindings)) {
113 vmw_context_binding_state_kill(val->staged_bindings);
114 kfree(val->staged_bindings);
115 val->staged_bindings = NULL;
117 vmw_resource_unreserve(res, new_backup,
118 val->new_backup_offset);
119 vmw_dmabuf_unreference(&val->new_backup);
125 * vmw_resource_val_add - Add a resource to the software context's
126 * resource list if it's not already on it.
128 * @sw_context: Pointer to the software context.
129 * @res: Pointer to the resource.
130 * @p_node On successful return points to a valid pointer to a
131 * struct vmw_resource_val_node, if non-NULL on entry.
133 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
134 struct vmw_resource *res,
135 struct vmw_resource_val_node **p_node)
137 struct vmw_resource_val_node *node;
138 struct drm_hash_item *hash;
141 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
143 node = container_of(hash, struct vmw_resource_val_node, hash);
144 node->first_usage = false;
145 if (unlikely(p_node != NULL))
150 node = kzalloc(sizeof(*node), GFP_KERNEL);
151 if (unlikely(node == NULL)) {
152 DRM_ERROR("Failed to allocate a resource validation "
157 node->hash.key = (unsigned long) res;
158 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
159 if (unlikely(ret != 0)) {
160 DRM_ERROR("Failed to initialize a resource validation "
165 list_add_tail(&node->head, &sw_context->resource_list);
166 node->res = vmw_resource_reference(res);
167 node->first_usage = true;
169 if (unlikely(p_node != NULL))
176 * vmw_resource_relocation_add - Add a relocation to the relocation list
178 * @list: Pointer to head of relocation list.
179 * @res: The resource.
180 * @offset: Offset into the command buffer currently being parsed where the
181 * id that needs fixup is located. Granularity is 4 bytes.
183 static int vmw_resource_relocation_add(struct list_head *list,
184 const struct vmw_resource *res,
185 unsigned long offset)
187 struct vmw_resource_relocation *rel;
189 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
190 if (unlikely(rel == NULL)) {
191 DRM_ERROR("Failed to allocate a resource relocation.\n");
196 rel->offset = offset;
197 list_add_tail(&rel->head, list);
203 * vmw_resource_relocations_free - Free all relocations on a list
205 * @list: Pointer to the head of the relocation list.
207 static void vmw_resource_relocations_free(struct list_head *list)
209 struct vmw_resource_relocation *rel, *n;
211 list_for_each_entry_safe(rel, n, list, head) {
212 list_del(&rel->head);
218 * vmw_resource_relocations_apply - Apply all relocations on a list
220 * @cb: Pointer to the start of the command buffer bein patch. This need
221 * not be the same buffer as the one being parsed when the relocation
222 * list was built, but the contents must be the same modulo the
224 * @list: Pointer to the head of the relocation list.
226 static void vmw_resource_relocations_apply(uint32_t *cb,
227 struct list_head *list)
229 struct vmw_resource_relocation *rel;
231 list_for_each_entry(rel, list, head)
232 cb[rel->offset] = rel->res->id;
235 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
236 struct vmw_sw_context *sw_context,
237 SVGA3dCmdHeader *header)
239 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
242 static int vmw_cmd_ok(struct vmw_private *dev_priv,
243 struct vmw_sw_context *sw_context,
244 SVGA3dCmdHeader *header)
250 * vmw_bo_to_validate_list - add a bo to a validate list
252 * @sw_context: The software context used for this command submission batch.
253 * @bo: The buffer object to add.
254 * @validate_as_mob: Validate this buffer as a MOB.
255 * @p_val_node: If non-NULL Will be updated with the validate node number
258 * Returns -EINVAL if the limit of number of buffer objects per command
259 * submission is reached.
261 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
262 struct ttm_buffer_object *bo,
263 bool validate_as_mob,
264 uint32_t *p_val_node)
267 struct vmw_validate_buffer *vval_buf;
268 struct ttm_validate_buffer *val_buf;
269 struct drm_hash_item *hash;
272 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
274 vval_buf = container_of(hash, struct vmw_validate_buffer,
276 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
277 DRM_ERROR("Inconsistent buffer usage.\n");
280 val_buf = &vval_buf->base;
281 val_node = vval_buf - sw_context->val_bufs;
283 val_node = sw_context->cur_val_buf;
284 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
285 DRM_ERROR("Max number of DMA buffers per submission "
289 vval_buf = &sw_context->val_bufs[val_node];
290 vval_buf->hash.key = (unsigned long) bo;
291 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
292 if (unlikely(ret != 0)) {
293 DRM_ERROR("Failed to initialize a buffer validation "
297 ++sw_context->cur_val_buf;
298 val_buf = &vval_buf->base;
299 val_buf->bo = ttm_bo_reference(bo);
300 val_buf->reserved = false;
301 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
302 vval_buf->validate_as_mob = validate_as_mob;
305 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
308 *p_val_node = val_node;
314 * vmw_resources_reserve - Reserve all resources on the sw_context's
317 * @sw_context: Pointer to the software context.
319 * Note that since vmware's command submission currently is protected by
320 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
321 * since only a single thread at once will attempt this.
323 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
325 struct vmw_resource_val_node *val;
328 list_for_each_entry(val, &sw_context->resource_list, head) {
329 struct vmw_resource *res = val->res;
331 ret = vmw_resource_reserve(res, val->no_buffer_needed);
332 if (unlikely(ret != 0))
336 struct ttm_buffer_object *bo = &res->backup->base;
338 ret = vmw_bo_to_validate_list
340 vmw_resource_needs_backup(res), NULL);
342 if (unlikely(ret != 0))
350 * vmw_resources_validate - Validate all resources on the sw_context's
353 * @sw_context: Pointer to the software context.
355 * Before this function is called, all resource backup buffers must have
358 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
360 struct vmw_resource_val_node *val;
363 list_for_each_entry(val, &sw_context->resource_list, head) {
364 struct vmw_resource *res = val->res;
366 ret = vmw_resource_validate(res);
367 if (unlikely(ret != 0)) {
368 if (ret != -ERESTARTSYS)
369 DRM_ERROR("Failed to validate resource.\n");
377 * vmw_cmd_res_check - Check that a resource is present and if so, put it
378 * on the resource validate list unless it's already there.
380 * @dev_priv: Pointer to a device private structure.
381 * @sw_context: Pointer to the software context.
382 * @res_type: Resource type.
383 * @converter: User-space visisble type specific information.
384 * @id: Pointer to the location in the command buffer currently being
385 * parsed from where the user-space resource id handle is located.
387 static int vmw_cmd_res_check(struct vmw_private *dev_priv,
388 struct vmw_sw_context *sw_context,
389 enum vmw_res_type res_type,
390 const struct vmw_user_resource_conv *converter,
392 struct vmw_resource_val_node **p_val)
394 struct vmw_res_cache_entry *rcache =
395 &sw_context->res_cache[res_type];
396 struct vmw_resource *res;
397 struct vmw_resource_val_node *node;
400 if (*id == SVGA3D_INVALID_ID) {
403 if (res_type == vmw_res_context) {
404 DRM_ERROR("Illegal context invalid id.\n");
411 * Fastpath in case of repeated commands referencing the same
415 if (likely(rcache->valid && *id == rcache->handle)) {
416 const struct vmw_resource *res = rcache->res;
418 rcache->node->first_usage = false;
420 *p_val = rcache->node;
422 return vmw_resource_relocation_add
423 (&sw_context->res_relocations, res,
424 id - sw_context->buf_start);
427 ret = vmw_user_resource_lookup_handle(dev_priv,
432 if (unlikely(ret != 0)) {
433 DRM_ERROR("Could not find or use resource 0x%08x.\n",
439 rcache->valid = true;
441 rcache->handle = *id;
443 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
445 id - sw_context->buf_start);
446 if (unlikely(ret != 0))
449 ret = vmw_resource_val_add(sw_context, res, &node);
450 if (unlikely(ret != 0))
457 if (node->first_usage && res_type == vmw_res_context) {
458 node->staged_bindings =
459 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
460 if (node->staged_bindings == NULL) {
461 DRM_ERROR("Failed to allocate context binding "
465 INIT_LIST_HEAD(&node->staged_bindings->list);
468 vmw_resource_unreference(&res);
472 BUG_ON(sw_context->error_resource != NULL);
473 sw_context->error_resource = res;
479 * vmw_cmd_cid_check - Check a command header for valid context information.
481 * @dev_priv: Pointer to a device private structure.
482 * @sw_context: Pointer to the software context.
483 * @header: A command header with an embedded user-space context handle.
485 * Convenience function: Call vmw_cmd_res_check with the user-space context
486 * handle embedded in @header.
488 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
489 struct vmw_sw_context *sw_context,
490 SVGA3dCmdHeader *header)
493 SVGA3dCmdHeader header;
497 cmd = container_of(header, struct vmw_cid_cmd, header);
498 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
499 user_context_converter, &cmd->cid, NULL);
502 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
503 struct vmw_sw_context *sw_context,
504 SVGA3dCmdHeader *header)
507 SVGA3dCmdHeader header;
508 SVGA3dCmdSetRenderTarget body;
510 struct vmw_resource_val_node *ctx_node;
513 cmd = container_of(header, struct vmw_sid_cmd, header);
515 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
516 user_context_converter, &cmd->body.cid,
518 if (unlikely(ret != 0))
521 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
522 user_surface_converter,
523 &cmd->body.target.sid, NULL);
524 if (unlikely(ret != 0))
527 if (dev_priv->has_mob) {
528 struct vmw_ctx_bindinfo bi;
530 bi.ctx = ctx_node->res;
531 bi.bt = vmw_ctx_binding_rt;
532 bi.i1.rt_type = cmd->body.type;
533 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
539 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
540 struct vmw_sw_context *sw_context,
541 SVGA3dCmdHeader *header)
544 SVGA3dCmdHeader header;
545 SVGA3dCmdSurfaceCopy body;
549 cmd = container_of(header, struct vmw_sid_cmd, header);
550 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
551 user_surface_converter,
552 &cmd->body.src.sid, NULL);
553 if (unlikely(ret != 0))
555 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
556 user_surface_converter,
557 &cmd->body.dest.sid, NULL);
560 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
561 struct vmw_sw_context *sw_context,
562 SVGA3dCmdHeader *header)
565 SVGA3dCmdHeader header;
566 SVGA3dCmdSurfaceStretchBlt body;
570 cmd = container_of(header, struct vmw_sid_cmd, header);
571 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
572 user_surface_converter,
573 &cmd->body.src.sid, NULL);
574 if (unlikely(ret != 0))
576 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
577 user_surface_converter,
578 &cmd->body.dest.sid, NULL);
581 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
582 struct vmw_sw_context *sw_context,
583 SVGA3dCmdHeader *header)
586 SVGA3dCmdHeader header;
587 SVGA3dCmdBlitSurfaceToScreen body;
590 cmd = container_of(header, struct vmw_sid_cmd, header);
592 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
593 user_surface_converter,
594 &cmd->body.srcImage.sid, NULL);
597 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
598 struct vmw_sw_context *sw_context,
599 SVGA3dCmdHeader *header)
602 SVGA3dCmdHeader header;
603 SVGA3dCmdPresent body;
607 cmd = container_of(header, struct vmw_sid_cmd, header);
609 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
610 user_surface_converter, &cmd->body.sid,
615 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
617 * @dev_priv: The device private structure.
618 * @new_query_bo: The new buffer holding query results.
619 * @sw_context: The software context used for this command submission.
621 * This function checks whether @new_query_bo is suitable for holding
622 * query results, and if another buffer currently is pinned for query
623 * results. If so, the function prepares the state of @sw_context for
624 * switching pinned buffers after successful submission of the current
627 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
628 struct ttm_buffer_object *new_query_bo,
629 struct vmw_sw_context *sw_context)
631 struct vmw_res_cache_entry *ctx_entry =
632 &sw_context->res_cache[vmw_res_context];
635 BUG_ON(!ctx_entry->valid);
636 sw_context->last_query_ctx = ctx_entry->res;
638 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
640 if (unlikely(new_query_bo->num_pages > 4)) {
641 DRM_ERROR("Query buffer too large.\n");
645 if (unlikely(sw_context->cur_query_bo != NULL)) {
646 sw_context->needs_post_query_barrier = true;
647 ret = vmw_bo_to_validate_list(sw_context,
648 sw_context->cur_query_bo,
649 dev_priv->has_mob, NULL);
650 if (unlikely(ret != 0))
653 sw_context->cur_query_bo = new_query_bo;
655 ret = vmw_bo_to_validate_list(sw_context,
656 dev_priv->dummy_query_bo,
657 dev_priv->has_mob, NULL);
658 if (unlikely(ret != 0))
668 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
670 * @dev_priv: The device private structure.
671 * @sw_context: The software context used for this command submission batch.
673 * This function will check if we're switching query buffers, and will then,
674 * issue a dummy occlusion query wait used as a query barrier. When the fence
675 * object following that query wait has signaled, we are sure that all
676 * preceding queries have finished, and the old query buffer can be unpinned.
677 * However, since both the new query buffer and the old one are fenced with
678 * that fence, we can do an asynchronus unpin now, and be sure that the
679 * old query buffer won't be moved until the fence has signaled.
681 * As mentioned above, both the new - and old query buffers need to be fenced
682 * using a sequence emitted *after* calling this function.
684 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
685 struct vmw_sw_context *sw_context)
688 * The validate list should still hold references to all
692 if (sw_context->needs_post_query_barrier) {
693 struct vmw_res_cache_entry *ctx_entry =
694 &sw_context->res_cache[vmw_res_context];
695 struct vmw_resource *ctx;
698 BUG_ON(!ctx_entry->valid);
699 ctx = ctx_entry->res;
701 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
703 if (unlikely(ret != 0))
704 DRM_ERROR("Out of fifo space for dummy query.\n");
707 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
708 if (dev_priv->pinned_bo) {
709 vmw_bo_pin(dev_priv->pinned_bo, false);
710 ttm_bo_unref(&dev_priv->pinned_bo);
713 if (!sw_context->needs_post_query_barrier) {
714 vmw_bo_pin(sw_context->cur_query_bo, true);
717 * We pin also the dummy_query_bo buffer so that we
718 * don't need to validate it when emitting
719 * dummy queries in context destroy paths.
722 vmw_bo_pin(dev_priv->dummy_query_bo, true);
723 dev_priv->dummy_query_bo_pinned = true;
725 BUG_ON(sw_context->last_query_ctx == NULL);
726 dev_priv->query_cid = sw_context->last_query_ctx->id;
727 dev_priv->query_cid_valid = true;
728 dev_priv->pinned_bo =
729 ttm_bo_reference(sw_context->cur_query_bo);
735 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
736 * handle to a MOB id.
738 * @dev_priv: Pointer to a device private structure.
739 * @sw_context: The software context used for this command batch validation.
740 * @id: Pointer to the user-space handle to be translated.
741 * @vmw_bo_p: Points to a location that, on successful return will carry
742 * a reference-counted pointer to the DMA buffer identified by the
743 * user-space handle in @id.
745 * This function saves information needed to translate a user-space buffer
746 * handle to a MOB id. The translation does not take place immediately, but
747 * during a call to vmw_apply_relocations(). This function builds a relocation
748 * list and a list of buffers to validate. The former needs to be freed using
749 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
750 * needs to be freed using vmw_clear_validations.
752 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
753 struct vmw_sw_context *sw_context,
755 struct vmw_dma_buffer **vmw_bo_p)
757 struct vmw_dma_buffer *vmw_bo = NULL;
758 struct ttm_buffer_object *bo;
759 uint32_t handle = *id;
760 struct vmw_relocation *reloc;
763 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
764 if (unlikely(ret != 0)) {
765 DRM_ERROR("Could not find or use MOB buffer.\n");
770 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
771 DRM_ERROR("Max number relocations per submission"
777 reloc = &sw_context->relocs[sw_context->cur_reloc++];
779 reloc->location = NULL;
781 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
782 if (unlikely(ret != 0))
789 vmw_dmabuf_unreference(&vmw_bo);
795 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
796 * handle to a valid SVGAGuestPtr
798 * @dev_priv: Pointer to a device private structure.
799 * @sw_context: The software context used for this command batch validation.
800 * @ptr: Pointer to the user-space handle to be translated.
801 * @vmw_bo_p: Points to a location that, on successful return will carry
802 * a reference-counted pointer to the DMA buffer identified by the
803 * user-space handle in @id.
805 * This function saves information needed to translate a user-space buffer
806 * handle to a valid SVGAGuestPtr. The translation does not take place
807 * immediately, but during a call to vmw_apply_relocations().
808 * This function builds a relocation list and a list of buffers to validate.
809 * The former needs to be freed using either vmw_apply_relocations() or
810 * vmw_free_relocations(). The latter needs to be freed using
811 * vmw_clear_validations.
813 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
814 struct vmw_sw_context *sw_context,
816 struct vmw_dma_buffer **vmw_bo_p)
818 struct vmw_dma_buffer *vmw_bo = NULL;
819 struct ttm_buffer_object *bo;
820 uint32_t handle = ptr->gmrId;
821 struct vmw_relocation *reloc;
824 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
825 if (unlikely(ret != 0)) {
826 DRM_ERROR("Could not find or use GMR region.\n");
831 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
832 DRM_ERROR("Max number relocations per submission"
838 reloc = &sw_context->relocs[sw_context->cur_reloc++];
839 reloc->location = ptr;
841 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
842 if (unlikely(ret != 0))
849 vmw_dmabuf_unreference(&vmw_bo);
855 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
857 * @dev_priv: Pointer to a device private struct.
858 * @sw_context: The software context used for this command submission.
859 * @header: Pointer to the command header in the command stream.
861 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
862 struct vmw_sw_context *sw_context,
863 SVGA3dCmdHeader *header)
865 struct vmw_begin_gb_query_cmd {
866 SVGA3dCmdHeader header;
867 SVGA3dCmdBeginGBQuery q;
870 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
873 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
874 user_context_converter, &cmd->q.cid,
879 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
881 * @dev_priv: Pointer to a device private struct.
882 * @sw_context: The software context used for this command submission.
883 * @header: Pointer to the command header in the command stream.
885 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
886 struct vmw_sw_context *sw_context,
887 SVGA3dCmdHeader *header)
889 struct vmw_begin_query_cmd {
890 SVGA3dCmdHeader header;
891 SVGA3dCmdBeginQuery q;
894 cmd = container_of(header, struct vmw_begin_query_cmd,
897 if (unlikely(dev_priv->has_mob)) {
899 SVGA3dCmdHeader header;
900 SVGA3dCmdBeginGBQuery q;
903 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
905 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
906 gb_cmd.header.size = cmd->header.size;
907 gb_cmd.q.cid = cmd->q.cid;
908 gb_cmd.q.type = cmd->q.type;
910 memcpy(cmd, &gb_cmd, sizeof(*cmd));
911 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
914 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
915 user_context_converter, &cmd->q.cid,
920 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
922 * @dev_priv: Pointer to a device private struct.
923 * @sw_context: The software context used for this command submission.
924 * @header: Pointer to the command header in the command stream.
926 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
927 struct vmw_sw_context *sw_context,
928 SVGA3dCmdHeader *header)
930 struct vmw_dma_buffer *vmw_bo;
931 struct vmw_query_cmd {
932 SVGA3dCmdHeader header;
933 SVGA3dCmdEndGBQuery q;
937 cmd = container_of(header, struct vmw_query_cmd, header);
938 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
939 if (unlikely(ret != 0))
942 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
945 if (unlikely(ret != 0))
948 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
950 vmw_dmabuf_unreference(&vmw_bo);
955 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
957 * @dev_priv: Pointer to a device private struct.
958 * @sw_context: The software context used for this command submission.
959 * @header: Pointer to the command header in the command stream.
961 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
962 struct vmw_sw_context *sw_context,
963 SVGA3dCmdHeader *header)
965 struct vmw_dma_buffer *vmw_bo;
966 struct vmw_query_cmd {
967 SVGA3dCmdHeader header;
972 cmd = container_of(header, struct vmw_query_cmd, header);
973 if (dev_priv->has_mob) {
975 SVGA3dCmdHeader header;
976 SVGA3dCmdEndGBQuery q;
979 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
981 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
982 gb_cmd.header.size = cmd->header.size;
983 gb_cmd.q.cid = cmd->q.cid;
984 gb_cmd.q.type = cmd->q.type;
985 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
986 gb_cmd.q.offset = cmd->q.guestResult.offset;
988 memcpy(cmd, &gb_cmd, sizeof(*cmd));
989 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
992 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
993 if (unlikely(ret != 0))
996 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
999 if (unlikely(ret != 0))
1002 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1004 vmw_dmabuf_unreference(&vmw_bo);
1009 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1011 * @dev_priv: Pointer to a device private struct.
1012 * @sw_context: The software context used for this command submission.
1013 * @header: Pointer to the command header in the command stream.
1015 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1016 struct vmw_sw_context *sw_context,
1017 SVGA3dCmdHeader *header)
1019 struct vmw_dma_buffer *vmw_bo;
1020 struct vmw_query_cmd {
1021 SVGA3dCmdHeader header;
1022 SVGA3dCmdWaitForGBQuery q;
1026 cmd = container_of(header, struct vmw_query_cmd, header);
1027 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1028 if (unlikely(ret != 0))
1031 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1034 if (unlikely(ret != 0))
1037 vmw_dmabuf_unreference(&vmw_bo);
1042 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1044 * @dev_priv: Pointer to a device private struct.
1045 * @sw_context: The software context used for this command submission.
1046 * @header: Pointer to the command header in the command stream.
1048 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1049 struct vmw_sw_context *sw_context,
1050 SVGA3dCmdHeader *header)
1052 struct vmw_dma_buffer *vmw_bo;
1053 struct vmw_query_cmd {
1054 SVGA3dCmdHeader header;
1055 SVGA3dCmdWaitForQuery q;
1059 cmd = container_of(header, struct vmw_query_cmd, header);
1060 if (dev_priv->has_mob) {
1062 SVGA3dCmdHeader header;
1063 SVGA3dCmdWaitForGBQuery q;
1066 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1068 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1069 gb_cmd.header.size = cmd->header.size;
1070 gb_cmd.q.cid = cmd->q.cid;
1071 gb_cmd.q.type = cmd->q.type;
1072 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1073 gb_cmd.q.offset = cmd->q.guestResult.offset;
1075 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1076 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1079 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1080 if (unlikely(ret != 0))
1083 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1084 &cmd->q.guestResult,
1086 if (unlikely(ret != 0))
1089 vmw_dmabuf_unreference(&vmw_bo);
1093 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1094 struct vmw_sw_context *sw_context,
1095 SVGA3dCmdHeader *header)
1097 struct vmw_dma_buffer *vmw_bo = NULL;
1098 struct vmw_surface *srf = NULL;
1099 struct vmw_dma_cmd {
1100 SVGA3dCmdHeader header;
1101 SVGA3dCmdSurfaceDMA dma;
1105 cmd = container_of(header, struct vmw_dma_cmd, header);
1106 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1107 &cmd->dma.guest.ptr,
1109 if (unlikely(ret != 0))
1112 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1113 user_surface_converter, &cmd->dma.host.sid,
1115 if (unlikely(ret != 0)) {
1116 if (unlikely(ret != -ERESTARTSYS))
1117 DRM_ERROR("could not find surface for DMA.\n");
1118 goto out_no_surface;
1121 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1123 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
1126 vmw_dmabuf_unreference(&vmw_bo);
1130 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1131 struct vmw_sw_context *sw_context,
1132 SVGA3dCmdHeader *header)
1134 struct vmw_draw_cmd {
1135 SVGA3dCmdHeader header;
1136 SVGA3dCmdDrawPrimitives body;
1138 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1139 (unsigned long)header + sizeof(*cmd));
1140 SVGA3dPrimitiveRange *range;
1145 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1146 if (unlikely(ret != 0))
1149 cmd = container_of(header, struct vmw_draw_cmd, header);
1150 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1152 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1153 DRM_ERROR("Illegal number of vertex declarations.\n");
1157 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1158 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1159 user_surface_converter,
1160 &decl->array.surfaceId, NULL);
1161 if (unlikely(ret != 0))
1165 maxnum = (header->size - sizeof(cmd->body) -
1166 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1167 if (unlikely(cmd->body.numRanges > maxnum)) {
1168 DRM_ERROR("Illegal number of index ranges.\n");
1172 range = (SVGA3dPrimitiveRange *) decl;
1173 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1174 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1175 user_surface_converter,
1176 &range->indexArray.surfaceId, NULL);
1177 if (unlikely(ret != 0))
1184 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1185 struct vmw_sw_context *sw_context,
1186 SVGA3dCmdHeader *header)
1188 struct vmw_tex_state_cmd {
1189 SVGA3dCmdHeader header;
1190 SVGA3dCmdSetTextureState state;
1193 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1194 ((unsigned long) header + header->size + sizeof(header));
1195 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1196 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1197 struct vmw_resource_val_node *ctx_node;
1200 cmd = container_of(header, struct vmw_tex_state_cmd,
1203 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1204 user_context_converter, &cmd->state.cid,
1206 if (unlikely(ret != 0))
1209 for (; cur_state < last_state; ++cur_state) {
1210 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1213 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1214 user_surface_converter,
1215 &cur_state->value, NULL);
1216 if (unlikely(ret != 0))
1219 if (dev_priv->has_mob) {
1220 struct vmw_ctx_bindinfo bi;
1222 bi.ctx = ctx_node->res;
1223 bi.bt = vmw_ctx_binding_tex;
1224 bi.i1.texture_stage = cur_state->stage;
1225 vmw_context_binding_add(ctx_node->staged_bindings,
1233 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1234 struct vmw_sw_context *sw_context,
1237 struct vmw_dma_buffer *vmw_bo;
1242 SVGAFifoCmdDefineGMRFB body;
1245 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1248 if (unlikely(ret != 0))
1251 vmw_dmabuf_unreference(&vmw_bo);
1257 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1259 * @dev_priv: Pointer to a device private struct.
1260 * @sw_context: The software context being used for this batch.
1261 * @res_type: The resource type.
1262 * @converter: Information about user-space binding for this resource type.
1263 * @res_id: Pointer to the user-space resource handle in the command stream.
1264 * @buf_id: Pointer to the user-space backup buffer handle in the command
1266 * @backup_offset: Offset of backup into MOB.
1268 * This function prepares for registering a switch of backup buffers
1269 * in the resource metadata just prior to unreserving.
1271 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1272 struct vmw_sw_context *sw_context,
1273 enum vmw_res_type res_type,
1274 const struct vmw_user_resource_conv
1278 unsigned long backup_offset)
1281 struct vmw_dma_buffer *dma_buf;
1282 struct vmw_resource_val_node *val_node;
1284 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1285 converter, res_id, &val_node);
1286 if (unlikely(ret != 0))
1289 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1290 if (unlikely(ret != 0))
1293 if (val_node->first_usage)
1294 val_node->no_buffer_needed = true;
1296 vmw_dmabuf_unreference(&val_node->new_backup);
1297 val_node->new_backup = dma_buf;
1298 val_node->new_backup_offset = backup_offset;
1304 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1307 * @dev_priv: Pointer to a device private struct.
1308 * @sw_context: The software context being used for this batch.
1309 * @header: Pointer to the command header in the command stream.
1311 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1312 struct vmw_sw_context *sw_context,
1313 SVGA3dCmdHeader *header)
1315 struct vmw_bind_gb_surface_cmd {
1316 SVGA3dCmdHeader header;
1317 SVGA3dCmdBindGBSurface body;
1320 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1322 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1323 user_surface_converter,
1324 &cmd->body.sid, &cmd->body.mobid,
1329 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1332 * @dev_priv: Pointer to a device private struct.
1333 * @sw_context: The software context being used for this batch.
1334 * @header: Pointer to the command header in the command stream.
1336 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1337 struct vmw_sw_context *sw_context,
1338 SVGA3dCmdHeader *header)
1340 struct vmw_gb_surface_cmd {
1341 SVGA3dCmdHeader header;
1342 SVGA3dCmdUpdateGBImage body;
1345 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1347 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1348 user_surface_converter,
1349 &cmd->body.image.sid, NULL);
1353 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1356 * @dev_priv: Pointer to a device private struct.
1357 * @sw_context: The software context being used for this batch.
1358 * @header: Pointer to the command header in the command stream.
1360 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1361 struct vmw_sw_context *sw_context,
1362 SVGA3dCmdHeader *header)
1364 struct vmw_gb_surface_cmd {
1365 SVGA3dCmdHeader header;
1366 SVGA3dCmdUpdateGBSurface body;
1369 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1371 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1372 user_surface_converter,
1373 &cmd->body.sid, NULL);
1377 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1380 * @dev_priv: Pointer to a device private struct.
1381 * @sw_context: The software context being used for this batch.
1382 * @header: Pointer to the command header in the command stream.
1384 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1385 struct vmw_sw_context *sw_context,
1386 SVGA3dCmdHeader *header)
1388 struct vmw_gb_surface_cmd {
1389 SVGA3dCmdHeader header;
1390 SVGA3dCmdReadbackGBImage body;
1393 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1395 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1396 user_surface_converter,
1397 &cmd->body.image.sid, NULL);
1401 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1404 * @dev_priv: Pointer to a device private struct.
1405 * @sw_context: The software context being used for this batch.
1406 * @header: Pointer to the command header in the command stream.
1408 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1409 struct vmw_sw_context *sw_context,
1410 SVGA3dCmdHeader *header)
1412 struct vmw_gb_surface_cmd {
1413 SVGA3dCmdHeader header;
1414 SVGA3dCmdReadbackGBSurface body;
1417 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1419 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1420 user_surface_converter,
1421 &cmd->body.sid, NULL);
1425 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1428 * @dev_priv: Pointer to a device private struct.
1429 * @sw_context: The software context being used for this batch.
1430 * @header: Pointer to the command header in the command stream.
1432 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1433 struct vmw_sw_context *sw_context,
1434 SVGA3dCmdHeader *header)
1436 struct vmw_gb_surface_cmd {
1437 SVGA3dCmdHeader header;
1438 SVGA3dCmdInvalidateGBImage body;
1441 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1443 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1444 user_surface_converter,
1445 &cmd->body.image.sid, NULL);
1449 * vmw_cmd_invalidate_gb_surface - Validate an
1450 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1452 * @dev_priv: Pointer to a device private struct.
1453 * @sw_context: The software context being used for this batch.
1454 * @header: Pointer to the command header in the command stream.
1456 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1457 struct vmw_sw_context *sw_context,
1458 SVGA3dCmdHeader *header)
1460 struct vmw_gb_surface_cmd {
1461 SVGA3dCmdHeader header;
1462 SVGA3dCmdInvalidateGBSurface body;
1465 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1467 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1468 user_surface_converter,
1469 &cmd->body.sid, NULL);
1473 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1476 * @dev_priv: Pointer to a device private struct.
1477 * @sw_context: The software context being used for this batch.
1478 * @header: Pointer to the command header in the command stream.
1480 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1481 struct vmw_sw_context *sw_context,
1482 SVGA3dCmdHeader *header)
1484 struct vmw_set_shader_cmd {
1485 SVGA3dCmdHeader header;
1486 SVGA3dCmdSetShader body;
1488 struct vmw_resource_val_node *ctx_node;
1491 cmd = container_of(header, struct vmw_set_shader_cmd,
1494 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1495 user_context_converter, &cmd->body.cid,
1497 if (unlikely(ret != 0))
1500 if (dev_priv->has_mob) {
1501 struct vmw_ctx_bindinfo bi;
1503 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1504 user_shader_converter,
1505 &cmd->body.shid, NULL);
1506 if (unlikely(ret != 0))
1509 bi.ctx = ctx_node->res;
1510 bi.bt = vmw_ctx_binding_shader;
1511 bi.i1.shader_type = cmd->body.type;
1512 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1519 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1522 * @dev_priv: Pointer to a device private struct.
1523 * @sw_context: The software context being used for this batch.
1524 * @header: Pointer to the command header in the command stream.
1526 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1527 struct vmw_sw_context *sw_context,
1528 SVGA3dCmdHeader *header)
1530 struct vmw_bind_gb_shader_cmd {
1531 SVGA3dCmdHeader header;
1532 SVGA3dCmdBindGBShader body;
1535 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1538 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1539 user_shader_converter,
1540 &cmd->body.shid, &cmd->body.mobid,
1541 cmd->body.offsetInBytes);
1544 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1545 struct vmw_sw_context *sw_context,
1546 void *buf, uint32_t *size)
1548 uint32_t size_remaining = *size;
1551 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1553 case SVGA_CMD_UPDATE:
1554 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1556 case SVGA_CMD_DEFINE_GMRFB:
1557 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1559 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1560 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1562 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1563 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1566 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1570 if (*size > size_remaining) {
1571 DRM_ERROR("Invalid SVGA command (size mismatch):"
1576 if (unlikely(!sw_context->kernel)) {
1577 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1581 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1582 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1587 static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1588 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1589 false, false, false),
1590 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1591 false, false, false),
1592 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1593 true, false, false),
1594 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1595 true, false, false),
1596 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1597 true, false, false),
1598 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1599 false, false, false),
1600 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1601 false, false, false),
1602 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1603 true, false, false),
1604 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1605 true, false, false),
1606 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1607 true, false, false),
1608 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1609 &vmw_cmd_set_render_target_check, true, false, false),
1610 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1611 true, false, false),
1612 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1613 true, false, false),
1614 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1615 true, false, false),
1616 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1617 true, false, false),
1618 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1619 true, false, false),
1620 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1621 true, false, false),
1622 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1623 true, false, false),
1624 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1625 false, false, false),
1626 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1628 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1630 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1631 true, false, false),
1632 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1634 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1635 true, false, false),
1636 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1637 true, false, false),
1638 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1639 true, false, false),
1640 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1641 true, false, false),
1642 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1643 true, false, false),
1644 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1645 true, false, false),
1646 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1647 &vmw_cmd_blt_surf_screen_check, false, false, false),
1648 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1649 false, false, false),
1650 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1651 false, false, false),
1652 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1653 false, false, false),
1654 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1655 false, false, false),
1656 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1657 false, false, false),
1658 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1659 false, false, false),
1660 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1661 false, false, false),
1662 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1663 false, false, false),
1664 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1665 false, false, false),
1666 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1667 false, false, false),
1668 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1669 false, false, false),
1670 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1671 false, false, false),
1672 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1673 false, false, false),
1674 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1675 false, false, true),
1676 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1677 false, false, true),
1678 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1679 false, false, true),
1680 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1681 false, false, true),
1682 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1683 false, false, true),
1684 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1685 false, false, true),
1686 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1687 false, false, true),
1688 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1689 false, false, true),
1690 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1692 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1693 false, false, true),
1694 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1696 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1697 &vmw_cmd_update_gb_surface, true, false, true),
1698 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1699 &vmw_cmd_readback_gb_image, true, false, true),
1700 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1701 &vmw_cmd_readback_gb_surface, true, false, true),
1702 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1703 &vmw_cmd_invalidate_gb_image, true, false, true),
1704 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1705 &vmw_cmd_invalidate_gb_surface, true, false, true),
1706 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1707 false, false, true),
1708 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1709 false, false, true),
1710 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1711 false, false, true),
1712 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1713 false, false, true),
1714 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1715 false, false, true),
1716 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1717 false, false, true),
1718 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1720 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1721 false, false, true),
1722 VMW_CMD_DEF(SVGA_3D_CMD_BIND_SHADERCONSTS, &vmw_cmd_invalid,
1723 false, false, false),
1724 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1726 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1728 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1730 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1732 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1733 false, false, true),
1734 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1735 false, false, true),
1736 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1737 false, false, true),
1738 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1739 false, false, true),
1740 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1741 false, false, true),
1742 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1743 false, false, true),
1744 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1745 false, false, true),
1746 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1747 false, false, true),
1748 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1749 false, false, true),
1750 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1751 false, false, true),
1752 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1756 static int vmw_cmd_check(struct vmw_private *dev_priv,
1757 struct vmw_sw_context *sw_context,
1758 void *buf, uint32_t *size)
1761 uint32_t size_remaining = *size;
1762 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1764 const struct vmw_cmd_entry *entry;
1765 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
1767 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1768 /* Handle any none 3D commands */
1769 if (unlikely(cmd_id < SVGA_CMD_MAX))
1770 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1773 cmd_id = le32_to_cpu(header->id);
1774 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1776 cmd_id -= SVGA_3D_CMD_BASE;
1777 if (unlikely(*size > size_remaining))
1780 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1783 entry = &vmw_cmd_entries[cmd_id];
1784 if (unlikely(!entry->user_allow && !sw_context->kernel))
1785 goto out_privileged;
1787 if (unlikely(entry->gb_disable && gb))
1790 if (unlikely(entry->gb_enable && !gb))
1793 ret = entry->func(dev_priv, sw_context, header);
1794 if (unlikely(ret != 0))
1799 DRM_ERROR("Invalid SVGA3D command: %d\n",
1800 cmd_id + SVGA_3D_CMD_BASE);
1803 DRM_ERROR("Privileged SVGA3D command: %d\n",
1804 cmd_id + SVGA_3D_CMD_BASE);
1807 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1808 cmd_id + SVGA_3D_CMD_BASE);
1811 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
1812 cmd_id + SVGA_3D_CMD_BASE);
1816 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1817 struct vmw_sw_context *sw_context,
1821 int32_t cur_size = size;
1824 sw_context->buf_start = buf;
1826 while (cur_size > 0) {
1828 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1829 if (unlikely(ret != 0))
1831 buf = (void *)((unsigned long) buf + size);
1835 if (unlikely(cur_size != 0)) {
1836 DRM_ERROR("Command verifier out of sync.\n");
1843 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1845 sw_context->cur_reloc = 0;
1848 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1851 struct vmw_relocation *reloc;
1852 struct ttm_validate_buffer *validate;
1853 struct ttm_buffer_object *bo;
1855 for (i = 0; i < sw_context->cur_reloc; ++i) {
1856 reloc = &sw_context->relocs[i];
1857 validate = &sw_context->val_bufs[reloc->index].base;
1859 switch (bo->mem.mem_type) {
1861 reloc->location->offset += bo->offset;
1862 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
1865 reloc->location->gmrId = bo->mem.start;
1868 *reloc->mob_loc = bo->mem.start;
1874 vmw_free_relocations(sw_context);
1878 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1879 * all resources referenced by it.
1881 * @list: The resource list.
1883 static void vmw_resource_list_unreference(struct list_head *list)
1885 struct vmw_resource_val_node *val, *val_next;
1888 * Drop references to resources held during command submission.
1891 list_for_each_entry_safe(val, val_next, list, head) {
1892 list_del_init(&val->head);
1893 vmw_resource_unreference(&val->res);
1894 if (unlikely(val->staged_bindings))
1895 kfree(val->staged_bindings);
1900 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1902 struct vmw_validate_buffer *entry, *next;
1903 struct vmw_resource_val_node *val;
1906 * Drop references to DMA buffers held during command submission.
1908 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
1910 list_del(&entry->base.head);
1911 ttm_bo_unref(&entry->base.bo);
1912 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
1913 sw_context->cur_val_buf--;
1915 BUG_ON(sw_context->cur_val_buf != 0);
1917 list_for_each_entry(val, &sw_context->resource_list, head)
1918 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
1921 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1922 struct ttm_buffer_object *bo,
1923 bool validate_as_mob)
1929 * Don't validate pinned buffers.
1932 if (bo == dev_priv->pinned_bo ||
1933 (bo == dev_priv->dummy_query_bo &&
1934 dev_priv->dummy_query_bo_pinned))
1937 if (validate_as_mob)
1938 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1941 * Put BO in VRAM if there is space, otherwise as a GMR.
1942 * If there is no space in VRAM and GMR ids are all used up,
1943 * start evicting GMRs to make room. If the DMA buffer can't be
1944 * used as a GMR, this will return -ENOMEM.
1947 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1948 if (likely(ret == 0 || ret == -ERESTARTSYS))
1952 * If that failed, try VRAM again, this time evicting
1953 * previous contents.
1956 DRM_INFO("Falling through to VRAM.\n");
1957 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1961 static int vmw_validate_buffers(struct vmw_private *dev_priv,
1962 struct vmw_sw_context *sw_context)
1964 struct vmw_validate_buffer *entry;
1967 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1968 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1969 entry->validate_as_mob);
1970 if (unlikely(ret != 0))
1976 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1979 if (likely(sw_context->cmd_bounce_size >= size))
1982 if (sw_context->cmd_bounce_size == 0)
1983 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1985 while (sw_context->cmd_bounce_size < size) {
1986 sw_context->cmd_bounce_size =
1987 PAGE_ALIGN(sw_context->cmd_bounce_size +
1988 (sw_context->cmd_bounce_size >> 1));
1991 if (sw_context->cmd_bounce != NULL)
1992 vfree(sw_context->cmd_bounce);
1994 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
1996 if (sw_context->cmd_bounce == NULL) {
1997 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1998 sw_context->cmd_bounce_size = 0;
2006 * vmw_execbuf_fence_commands - create and submit a command stream fence
2008 * Creates a fence object and submits a command stream marker.
2009 * If this fails for some reason, We sync the fifo and return NULL.
2010 * It is then safe to fence buffers with a NULL pointer.
2012 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2013 * a userspace handle if @p_handle is not NULL, otherwise not.
2016 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2017 struct vmw_private *dev_priv,
2018 struct vmw_fence_obj **p_fence,
2023 bool synced = false;
2025 /* p_handle implies file_priv. */
2026 BUG_ON(p_handle != NULL && file_priv == NULL);
2028 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2029 if (unlikely(ret != 0)) {
2030 DRM_ERROR("Fence submission error. Syncing.\n");
2034 if (p_handle != NULL)
2035 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2037 DRM_VMW_FENCE_FLAG_EXEC,
2040 ret = vmw_fence_create(dev_priv->fman, sequence,
2041 DRM_VMW_FENCE_FLAG_EXEC,
2044 if (unlikely(ret != 0 && !synced)) {
2045 (void) vmw_fallback_wait(dev_priv, false, false,
2047 VMW_FENCE_WAIT_TIMEOUT);
2055 * vmw_execbuf_copy_fence_user - copy fence object information to
2058 * @dev_priv: Pointer to a vmw_private struct.
2059 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2060 * @ret: Return value from fence object creation.
2061 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2062 * which the information should be copied.
2063 * @fence: Pointer to the fenc object.
2064 * @fence_handle: User-space fence handle.
2066 * This function copies fence information to user-space. If copying fails,
2067 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2068 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2069 * the error will hopefully be detected.
2070 * Also if copying fails, user-space will be unable to signal the fence
2071 * object so we wait for it immediately, and then unreference the
2072 * user-space reference.
2075 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2076 struct vmw_fpriv *vmw_fp,
2078 struct drm_vmw_fence_rep __user *user_fence_rep,
2079 struct vmw_fence_obj *fence,
2080 uint32_t fence_handle)
2082 struct drm_vmw_fence_rep fence_rep;
2084 if (user_fence_rep == NULL)
2087 memset(&fence_rep, 0, sizeof(fence_rep));
2089 fence_rep.error = ret;
2091 BUG_ON(fence == NULL);
2093 fence_rep.handle = fence_handle;
2094 fence_rep.seqno = fence->seqno;
2095 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2096 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2100 * copy_to_user errors will be detected by user space not
2101 * seeing fence_rep::error filled in. Typically
2102 * user-space would have pre-set that member to -EFAULT.
2104 ret = copy_to_user(user_fence_rep, &fence_rep,
2108 * User-space lost the fence object. We need to sync
2109 * and unreference the handle.
2111 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2112 ttm_ref_object_base_unref(vmw_fp->tfile,
2113 fence_handle, TTM_REF_USAGE);
2114 DRM_ERROR("Fence copy error. Syncing.\n");
2115 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2117 VMW_FENCE_WAIT_TIMEOUT);
2121 int vmw_execbuf_process(struct drm_file *file_priv,
2122 struct vmw_private *dev_priv,
2123 void __user *user_commands,
2124 void *kernel_commands,
2125 uint32_t command_size,
2126 uint64_t throttle_us,
2127 struct drm_vmw_fence_rep __user *user_fence_rep,
2128 struct vmw_fence_obj **out_fence)
2130 struct vmw_sw_context *sw_context = &dev_priv->ctx;
2131 struct vmw_fence_obj *fence = NULL;
2132 struct vmw_resource *error_resource;
2133 struct list_head resource_list;
2134 struct ww_acquire_ctx ticket;
2139 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2140 if (unlikely(ret != 0))
2141 return -ERESTARTSYS;
2143 if (kernel_commands == NULL) {
2144 sw_context->kernel = false;
2146 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2147 if (unlikely(ret != 0))
2151 ret = copy_from_user(sw_context->cmd_bounce,
2152 user_commands, command_size);
2154 if (unlikely(ret != 0)) {
2156 DRM_ERROR("Failed copying commands.\n");
2159 kernel_commands = sw_context->cmd_bounce;
2161 sw_context->kernel = true;
2163 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
2164 sw_context->cur_reloc = 0;
2165 sw_context->cur_val_buf = 0;
2166 sw_context->fence_flags = 0;
2167 INIT_LIST_HEAD(&sw_context->resource_list);
2168 sw_context->cur_query_bo = dev_priv->pinned_bo;
2169 sw_context->last_query_ctx = NULL;
2170 sw_context->needs_post_query_barrier = false;
2171 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2172 INIT_LIST_HEAD(&sw_context->validate_nodes);
2173 INIT_LIST_HEAD(&sw_context->res_relocations);
2174 if (!sw_context->res_ht_initialized) {
2175 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2176 if (unlikely(ret != 0))
2178 sw_context->res_ht_initialized = true;
2181 INIT_LIST_HEAD(&resource_list);
2182 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2184 if (unlikely(ret != 0))
2187 ret = vmw_resources_reserve(sw_context);
2188 if (unlikely(ret != 0))
2191 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2192 if (unlikely(ret != 0))
2195 ret = vmw_validate_buffers(dev_priv, sw_context);
2196 if (unlikely(ret != 0))
2199 ret = vmw_resources_validate(sw_context);
2200 if (unlikely(ret != 0))
2204 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2207 if (unlikely(ret != 0))
2211 cmd = vmw_fifo_reserve(dev_priv, command_size);
2212 if (unlikely(cmd == NULL)) {
2213 DRM_ERROR("Failed reserving fifo space for commands.\n");
2218 vmw_apply_relocations(sw_context);
2219 memcpy(cmd, kernel_commands, command_size);
2221 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2222 vmw_resource_relocations_free(&sw_context->res_relocations);
2224 vmw_fifo_commit(dev_priv, command_size);
2226 vmw_query_bo_switch_commit(dev_priv, sw_context);
2227 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2229 (user_fence_rep) ? &handle : NULL);
2231 * This error is harmless, because if fence submission fails,
2232 * vmw_fifo_send_fence will sync. The error will be propagated to
2233 * user-space in @fence_rep
2237 DRM_ERROR("Fence submission error. Syncing.\n");
2239 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2240 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2243 if (unlikely(dev_priv->pinned_bo != NULL &&
2244 !dev_priv->query_cid_valid))
2245 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2247 vmw_clear_validations(sw_context);
2248 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2249 user_fence_rep, fence, handle);
2251 /* Don't unreference when handing fence out */
2252 if (unlikely(out_fence != NULL)) {
2255 } else if (likely(fence != NULL)) {
2256 vmw_fence_obj_unreference(&fence);
2259 list_splice_init(&sw_context->resource_list, &resource_list);
2260 mutex_unlock(&dev_priv->cmdbuf_mutex);
2263 * Unreference resources outside of the cmdbuf_mutex to
2264 * avoid deadlocks in resource destruction paths.
2266 vmw_resource_list_unreference(&resource_list);
2271 vmw_resource_relocations_free(&sw_context->res_relocations);
2272 vmw_free_relocations(sw_context);
2273 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2274 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2275 vmw_clear_validations(sw_context);
2276 if (unlikely(dev_priv->pinned_bo != NULL &&
2277 !dev_priv->query_cid_valid))
2278 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2280 list_splice_init(&sw_context->resource_list, &resource_list);
2281 error_resource = sw_context->error_resource;
2282 sw_context->error_resource = NULL;
2283 mutex_unlock(&dev_priv->cmdbuf_mutex);
2286 * Unreference resources outside of the cmdbuf_mutex to
2287 * avoid deadlocks in resource destruction paths.
2289 vmw_resource_list_unreference(&resource_list);
2290 if (unlikely(error_resource != NULL))
2291 vmw_resource_unreference(&error_resource);
2297 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2299 * @dev_priv: The device private structure.
2301 * This function is called to idle the fifo and unpin the query buffer
2302 * if the normal way to do this hits an error, which should typically be
2305 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2307 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2309 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2310 vmw_bo_pin(dev_priv->pinned_bo, false);
2311 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2312 dev_priv->dummy_query_bo_pinned = false;
2317 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2320 * @dev_priv: The device private structure.
2321 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2322 * _after_ a query barrier that flushes all queries touching the current
2323 * buffer pointed to by @dev_priv->pinned_bo
2325 * This function should be used to unpin the pinned query bo, or
2326 * as a query barrier when we need to make sure that all queries have
2327 * finished before the next fifo command. (For example on hardware
2328 * context destructions where the hardware may otherwise leak unfinished
2331 * This function does not return any failure codes, but make attempts
2332 * to do safe unpinning in case of errors.
2334 * The function will synchronize on the previous query barrier, and will
2335 * thus not finish until that barrier has executed.
2337 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2338 * before calling this function.
2340 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2341 struct vmw_fence_obj *fence)
2344 struct list_head validate_list;
2345 struct ttm_validate_buffer pinned_val, query_val;
2346 struct vmw_fence_obj *lfence = NULL;
2347 struct ww_acquire_ctx ticket;
2349 if (dev_priv->pinned_bo == NULL)
2352 INIT_LIST_HEAD(&validate_list);
2354 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2355 list_add_tail(&pinned_val.head, &validate_list);
2357 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2358 list_add_tail(&query_val.head, &validate_list);
2361 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2362 } while (ret == -ERESTARTSYS);
2364 if (unlikely(ret != 0)) {
2365 vmw_execbuf_unpin_panic(dev_priv);
2366 goto out_no_reserve;
2369 if (dev_priv->query_cid_valid) {
2370 BUG_ON(fence != NULL);
2371 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2372 if (unlikely(ret != 0)) {
2373 vmw_execbuf_unpin_panic(dev_priv);
2376 dev_priv->query_cid_valid = false;
2379 vmw_bo_pin(dev_priv->pinned_bo, false);
2380 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2381 dev_priv->dummy_query_bo_pinned = false;
2383 if (fence == NULL) {
2384 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2388 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2390 vmw_fence_obj_unreference(&lfence);
2392 ttm_bo_unref(&query_val.bo);
2393 ttm_bo_unref(&pinned_val.bo);
2394 ttm_bo_unref(&dev_priv->pinned_bo);
2400 ttm_eu_backoff_reservation(&ticket, &validate_list);
2402 ttm_bo_unref(&query_val.bo);
2403 ttm_bo_unref(&pinned_val.bo);
2404 ttm_bo_unref(&dev_priv->pinned_bo);
2408 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2411 * @dev_priv: The device private structure.
2413 * This function should be used to unpin the pinned query bo, or
2414 * as a query barrier when we need to make sure that all queries have
2415 * finished before the next fifo command. (For example on hardware
2416 * context destructions where the hardware may otherwise leak unfinished
2419 * This function does not return any failure codes, but make attempts
2420 * to do safe unpinning in case of errors.
2422 * The function will synchronize on the previous query barrier, and will
2423 * thus not finish until that barrier has executed.
2425 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2427 mutex_lock(&dev_priv->cmdbuf_mutex);
2428 if (dev_priv->query_cid_valid)
2429 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2430 mutex_unlock(&dev_priv->cmdbuf_mutex);
2434 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2435 struct drm_file *file_priv)
2437 struct vmw_private *dev_priv = vmw_priv(dev);
2438 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2439 struct vmw_master *vmaster = vmw_master(file_priv->master);
2443 * This will allow us to extend the ioctl argument while
2444 * maintaining backwards compatibility:
2445 * We take different code paths depending on the value of
2449 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2450 DRM_ERROR("Incorrect execbuf version.\n");
2451 DRM_ERROR("You're running outdated experimental "
2452 "vmwgfx user-space drivers.");
2456 ret = ttm_read_lock(&vmaster->lock, true);
2457 if (unlikely(ret != 0))
2460 ret = vmw_execbuf_process(file_priv, dev_priv,
2461 (void __user *)(unsigned long)arg->commands,
2462 NULL, arg->command_size, arg->throttle_us,
2463 (void __user *)(unsigned long)arg->fence_rep,
2466 if (unlikely(ret != 0))
2469 vmw_kms_cursor_post_execbuf(dev_priv);
2472 ttm_read_unlock(&vmaster->lock);