drm/vmwgfx: Track context bindings and scrub them upon exiting execbuf
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32
33 #define VMW_RES_HT_ORDER 12
34
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44         struct list_head head;
45         const struct vmw_resource *res;
46         unsigned long offset;
47 };
48
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66         struct list_head head;
67         struct drm_hash_item hash;
68         struct vmw_resource *res;
69         struct vmw_dma_buffer *new_backup;
70         struct vmw_ctx_binding_state *staged_bindings;
71         unsigned long new_backup_offset;
72         bool first_usage;
73         bool no_buffer_needed;
74 };
75
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84         int (*func) (struct vmw_private *, struct vmw_sw_context *,
85                      SVGA3dCmdHeader *);
86         bool user_allow;
87         bool gb_disable;
88         bool gb_enable;
89 };
90
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)  \
92         [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93                                        (_gb_disable), (_gb_enable)}
94
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
102 static void vmw_resource_list_unreserve(struct list_head *list,
103                                         bool backoff)
104 {
105         struct vmw_resource_val_node *val;
106
107         list_for_each_entry(val, list, head) {
108                 struct vmw_resource *res = val->res;
109                 struct vmw_dma_buffer *new_backup =
110                         backoff ? NULL : val->new_backup;
111
112                 if (unlikely(val->staged_bindings)) {
113                         vmw_context_binding_state_kill(val->staged_bindings);
114                         kfree(val->staged_bindings);
115                         val->staged_bindings = NULL;
116                 }
117                 vmw_resource_unreserve(res, new_backup,
118                         val->new_backup_offset);
119                 vmw_dmabuf_unreference(&val->new_backup);
120         }
121 }
122
123
124 /**
125  * vmw_resource_val_add - Add a resource to the software context's
126  * resource list if it's not already on it.
127  *
128  * @sw_context: Pointer to the software context.
129  * @res: Pointer to the resource.
130  * @p_node On successful return points to a valid pointer to a
131  * struct vmw_resource_val_node, if non-NULL on entry.
132  */
133 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
134                                 struct vmw_resource *res,
135                                 struct vmw_resource_val_node **p_node)
136 {
137         struct vmw_resource_val_node *node;
138         struct drm_hash_item *hash;
139         int ret;
140
141         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
142                                     &hash) == 0)) {
143                 node = container_of(hash, struct vmw_resource_val_node, hash);
144                 node->first_usage = false;
145                 if (unlikely(p_node != NULL))
146                         *p_node = node;
147                 return 0;
148         }
149
150         node = kzalloc(sizeof(*node), GFP_KERNEL);
151         if (unlikely(node == NULL)) {
152                 DRM_ERROR("Failed to allocate a resource validation "
153                           "entry.\n");
154                 return -ENOMEM;
155         }
156
157         node->hash.key = (unsigned long) res;
158         ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
159         if (unlikely(ret != 0)) {
160                 DRM_ERROR("Failed to initialize a resource validation "
161                           "entry.\n");
162                 kfree(node);
163                 return ret;
164         }
165         list_add_tail(&node->head, &sw_context->resource_list);
166         node->res = vmw_resource_reference(res);
167         node->first_usage = true;
168
169         if (unlikely(p_node != NULL))
170                 *p_node = node;
171
172         return 0;
173 }
174
175 /**
176  * vmw_resource_relocation_add - Add a relocation to the relocation list
177  *
178  * @list: Pointer to head of relocation list.
179  * @res: The resource.
180  * @offset: Offset into the command buffer currently being parsed where the
181  * id that needs fixup is located. Granularity is 4 bytes.
182  */
183 static int vmw_resource_relocation_add(struct list_head *list,
184                                        const struct vmw_resource *res,
185                                        unsigned long offset)
186 {
187         struct vmw_resource_relocation *rel;
188
189         rel = kmalloc(sizeof(*rel), GFP_KERNEL);
190         if (unlikely(rel == NULL)) {
191                 DRM_ERROR("Failed to allocate a resource relocation.\n");
192                 return -ENOMEM;
193         }
194
195         rel->res = res;
196         rel->offset = offset;
197         list_add_tail(&rel->head, list);
198
199         return 0;
200 }
201
202 /**
203  * vmw_resource_relocations_free - Free all relocations on a list
204  *
205  * @list: Pointer to the head of the relocation list.
206  */
207 static void vmw_resource_relocations_free(struct list_head *list)
208 {
209         struct vmw_resource_relocation *rel, *n;
210
211         list_for_each_entry_safe(rel, n, list, head) {
212                 list_del(&rel->head);
213                 kfree(rel);
214         }
215 }
216
217 /**
218  * vmw_resource_relocations_apply - Apply all relocations on a list
219  *
220  * @cb: Pointer to the start of the command buffer bein patch. This need
221  * not be the same buffer as the one being parsed when the relocation
222  * list was built, but the contents must be the same modulo the
223  * resource ids.
224  * @list: Pointer to the head of the relocation list.
225  */
226 static void vmw_resource_relocations_apply(uint32_t *cb,
227                                            struct list_head *list)
228 {
229         struct vmw_resource_relocation *rel;
230
231         list_for_each_entry(rel, list, head)
232                 cb[rel->offset] = rel->res->id;
233 }
234
235 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
236                            struct vmw_sw_context *sw_context,
237                            SVGA3dCmdHeader *header)
238 {
239         return capable(CAP_SYS_ADMIN) ? : -EINVAL;
240 }
241
242 static int vmw_cmd_ok(struct vmw_private *dev_priv,
243                       struct vmw_sw_context *sw_context,
244                       SVGA3dCmdHeader *header)
245 {
246         return 0;
247 }
248
249 /**
250  * vmw_bo_to_validate_list - add a bo to a validate list
251  *
252  * @sw_context: The software context used for this command submission batch.
253  * @bo: The buffer object to add.
254  * @validate_as_mob: Validate this buffer as a MOB.
255  * @p_val_node: If non-NULL Will be updated with the validate node number
256  * on return.
257  *
258  * Returns -EINVAL if the limit of number of buffer objects per command
259  * submission is reached.
260  */
261 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
262                                    struct ttm_buffer_object *bo,
263                                    bool validate_as_mob,
264                                    uint32_t *p_val_node)
265 {
266         uint32_t val_node;
267         struct vmw_validate_buffer *vval_buf;
268         struct ttm_validate_buffer *val_buf;
269         struct drm_hash_item *hash;
270         int ret;
271
272         if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
273                                     &hash) == 0)) {
274                 vval_buf = container_of(hash, struct vmw_validate_buffer,
275                                         hash);
276                 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
277                         DRM_ERROR("Inconsistent buffer usage.\n");
278                         return -EINVAL;
279                 }
280                 val_buf = &vval_buf->base;
281                 val_node = vval_buf - sw_context->val_bufs;
282         } else {
283                 val_node = sw_context->cur_val_buf;
284                 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
285                         DRM_ERROR("Max number of DMA buffers per submission "
286                                   "exceeded.\n");
287                         return -EINVAL;
288                 }
289                 vval_buf = &sw_context->val_bufs[val_node];
290                 vval_buf->hash.key = (unsigned long) bo;
291                 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
292                 if (unlikely(ret != 0)) {
293                         DRM_ERROR("Failed to initialize a buffer validation "
294                                   "entry.\n");
295                         return ret;
296                 }
297                 ++sw_context->cur_val_buf;
298                 val_buf = &vval_buf->base;
299                 val_buf->bo = ttm_bo_reference(bo);
300                 val_buf->reserved = false;
301                 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
302                 vval_buf->validate_as_mob = validate_as_mob;
303         }
304
305         sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
306
307         if (p_val_node)
308                 *p_val_node = val_node;
309
310         return 0;
311 }
312
313 /**
314  * vmw_resources_reserve - Reserve all resources on the sw_context's
315  * resource list.
316  *
317  * @sw_context: Pointer to the software context.
318  *
319  * Note that since vmware's command submission currently is protected by
320  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
321  * since only a single thread at once will attempt this.
322  */
323 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
324 {
325         struct vmw_resource_val_node *val;
326         int ret;
327
328         list_for_each_entry(val, &sw_context->resource_list, head) {
329                 struct vmw_resource *res = val->res;
330
331                 ret = vmw_resource_reserve(res, val->no_buffer_needed);
332                 if (unlikely(ret != 0))
333                         return ret;
334
335                 if (res->backup) {
336                         struct ttm_buffer_object *bo = &res->backup->base;
337
338                         ret = vmw_bo_to_validate_list
339                                 (sw_context, bo,
340                                  vmw_resource_needs_backup(res), NULL);
341
342                         if (unlikely(ret != 0))
343                                 return ret;
344                 }
345         }
346         return 0;
347 }
348
349 /**
350  * vmw_resources_validate - Validate all resources on the sw_context's
351  * resource list.
352  *
353  * @sw_context: Pointer to the software context.
354  *
355  * Before this function is called, all resource backup buffers must have
356  * been validated.
357  */
358 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
359 {
360         struct vmw_resource_val_node *val;
361         int ret;
362
363         list_for_each_entry(val, &sw_context->resource_list, head) {
364                 struct vmw_resource *res = val->res;
365
366                 ret = vmw_resource_validate(res);
367                 if (unlikely(ret != 0)) {
368                         if (ret != -ERESTARTSYS)
369                                 DRM_ERROR("Failed to validate resource.\n");
370                         return ret;
371                 }
372         }
373         return 0;
374 }
375
376 /**
377  * vmw_cmd_res_check - Check that a resource is present and if so, put it
378  * on the resource validate list unless it's already there.
379  *
380  * @dev_priv: Pointer to a device private structure.
381  * @sw_context: Pointer to the software context.
382  * @res_type: Resource type.
383  * @converter: User-space visisble type specific information.
384  * @id: Pointer to the location in the command buffer currently being
385  * parsed from where the user-space resource id handle is located.
386  */
387 static int vmw_cmd_res_check(struct vmw_private *dev_priv,
388                              struct vmw_sw_context *sw_context,
389                              enum vmw_res_type res_type,
390                              const struct vmw_user_resource_conv *converter,
391                              uint32_t *id,
392                              struct vmw_resource_val_node **p_val)
393 {
394         struct vmw_res_cache_entry *rcache =
395                 &sw_context->res_cache[res_type];
396         struct vmw_resource *res;
397         struct vmw_resource_val_node *node;
398         int ret;
399
400         if (*id == SVGA3D_INVALID_ID) {
401                 if (p_val)
402                         *p_val = NULL;
403                 if (res_type == vmw_res_context) {
404                         DRM_ERROR("Illegal context invalid id.\n");
405                         return -EINVAL;
406                 }
407                 return 0;
408         }
409
410         /*
411          * Fastpath in case of repeated commands referencing the same
412          * resource
413          */
414
415         if (likely(rcache->valid && *id == rcache->handle)) {
416                 const struct vmw_resource *res = rcache->res;
417
418                 rcache->node->first_usage = false;
419                 if (p_val)
420                         *p_val = rcache->node;
421
422                 return vmw_resource_relocation_add
423                         (&sw_context->res_relocations, res,
424                          id - sw_context->buf_start);
425         }
426
427         ret = vmw_user_resource_lookup_handle(dev_priv,
428                                               sw_context->tfile,
429                                               *id,
430                                               converter,
431                                               &res);
432         if (unlikely(ret != 0)) {
433                 DRM_ERROR("Could not find or use resource 0x%08x.\n",
434                           (unsigned) *id);
435                 dump_stack();
436                 return ret;
437         }
438
439         rcache->valid = true;
440         rcache->res = res;
441         rcache->handle = *id;
442
443         ret = vmw_resource_relocation_add(&sw_context->res_relocations,
444                                           res,
445                                           id - sw_context->buf_start);
446         if (unlikely(ret != 0))
447                 goto out_no_reloc;
448
449         ret = vmw_resource_val_add(sw_context, res, &node);
450         if (unlikely(ret != 0))
451                 goto out_no_reloc;
452
453         rcache->node = node;
454         if (p_val)
455                 *p_val = node;
456
457         if (node->first_usage && res_type == vmw_res_context) {
458                 node->staged_bindings =
459                         kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
460                 if (node->staged_bindings == NULL) {
461                         DRM_ERROR("Failed to allocate context binding "
462                                   "information.\n");
463                         goto out_no_reloc;
464                 }
465                 INIT_LIST_HEAD(&node->staged_bindings->list);
466         }
467
468         vmw_resource_unreference(&res);
469         return 0;
470
471 out_no_reloc:
472         BUG_ON(sw_context->error_resource != NULL);
473         sw_context->error_resource = res;
474
475         return ret;
476 }
477
478 /**
479  * vmw_cmd_cid_check - Check a command header for valid context information.
480  *
481  * @dev_priv: Pointer to a device private structure.
482  * @sw_context: Pointer to the software context.
483  * @header: A command header with an embedded user-space context handle.
484  *
485  * Convenience function: Call vmw_cmd_res_check with the user-space context
486  * handle embedded in @header.
487  */
488 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
489                              struct vmw_sw_context *sw_context,
490                              SVGA3dCmdHeader *header)
491 {
492         struct vmw_cid_cmd {
493                 SVGA3dCmdHeader header;
494                 __le32 cid;
495         } *cmd;
496
497         cmd = container_of(header, struct vmw_cid_cmd, header);
498         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
499                                  user_context_converter, &cmd->cid, NULL);
500 }
501
502 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
503                                            struct vmw_sw_context *sw_context,
504                                            SVGA3dCmdHeader *header)
505 {
506         struct vmw_sid_cmd {
507                 SVGA3dCmdHeader header;
508                 SVGA3dCmdSetRenderTarget body;
509         } *cmd;
510         struct vmw_resource_val_node *ctx_node;
511         int ret;
512
513         cmd = container_of(header, struct vmw_sid_cmd, header);
514
515         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
516                                 user_context_converter, &cmd->body.cid,
517                                 &ctx_node);
518         if (unlikely(ret != 0))
519                 return ret;
520
521         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
522                                 user_surface_converter,
523                                 &cmd->body.target.sid, NULL);
524         if (unlikely(ret != 0))
525                 return ret;
526
527         if (dev_priv->has_mob) {
528                 struct vmw_ctx_bindinfo bi;
529
530                 bi.ctx = ctx_node->res;
531                 bi.bt = vmw_ctx_binding_rt;
532                 bi.i1.rt_type = cmd->body.type;
533                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
534         }
535
536         return 0;
537 }
538
539 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
540                                       struct vmw_sw_context *sw_context,
541                                       SVGA3dCmdHeader *header)
542 {
543         struct vmw_sid_cmd {
544                 SVGA3dCmdHeader header;
545                 SVGA3dCmdSurfaceCopy body;
546         } *cmd;
547         int ret;
548
549         cmd = container_of(header, struct vmw_sid_cmd, header);
550         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
551                                 user_surface_converter,
552                                 &cmd->body.src.sid, NULL);
553         if (unlikely(ret != 0))
554                 return ret;
555         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
556                                  user_surface_converter,
557                                  &cmd->body.dest.sid, NULL);
558 }
559
560 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
561                                      struct vmw_sw_context *sw_context,
562                                      SVGA3dCmdHeader *header)
563 {
564         struct vmw_sid_cmd {
565                 SVGA3dCmdHeader header;
566                 SVGA3dCmdSurfaceStretchBlt body;
567         } *cmd;
568         int ret;
569
570         cmd = container_of(header, struct vmw_sid_cmd, header);
571         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
572                                 user_surface_converter,
573                                 &cmd->body.src.sid, NULL);
574         if (unlikely(ret != 0))
575                 return ret;
576         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
577                                  user_surface_converter,
578                                  &cmd->body.dest.sid, NULL);
579 }
580
581 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
582                                          struct vmw_sw_context *sw_context,
583                                          SVGA3dCmdHeader *header)
584 {
585         struct vmw_sid_cmd {
586                 SVGA3dCmdHeader header;
587                 SVGA3dCmdBlitSurfaceToScreen body;
588         } *cmd;
589
590         cmd = container_of(header, struct vmw_sid_cmd, header);
591
592         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
593                                  user_surface_converter,
594                                  &cmd->body.srcImage.sid, NULL);
595 }
596
597 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
598                                  struct vmw_sw_context *sw_context,
599                                  SVGA3dCmdHeader *header)
600 {
601         struct vmw_sid_cmd {
602                 SVGA3dCmdHeader header;
603                 SVGA3dCmdPresent body;
604         } *cmd;
605
606
607         cmd = container_of(header, struct vmw_sid_cmd, header);
608
609         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
610                                  user_surface_converter, &cmd->body.sid,
611                                  NULL);
612 }
613
614 /**
615  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
616  *
617  * @dev_priv: The device private structure.
618  * @new_query_bo: The new buffer holding query results.
619  * @sw_context: The software context used for this command submission.
620  *
621  * This function checks whether @new_query_bo is suitable for holding
622  * query results, and if another buffer currently is pinned for query
623  * results. If so, the function prepares the state of @sw_context for
624  * switching pinned buffers after successful submission of the current
625  * command batch.
626  */
627 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
628                                        struct ttm_buffer_object *new_query_bo,
629                                        struct vmw_sw_context *sw_context)
630 {
631         struct vmw_res_cache_entry *ctx_entry =
632                 &sw_context->res_cache[vmw_res_context];
633         int ret;
634
635         BUG_ON(!ctx_entry->valid);
636         sw_context->last_query_ctx = ctx_entry->res;
637
638         if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
639
640                 if (unlikely(new_query_bo->num_pages > 4)) {
641                         DRM_ERROR("Query buffer too large.\n");
642                         return -EINVAL;
643                 }
644
645                 if (unlikely(sw_context->cur_query_bo != NULL)) {
646                         sw_context->needs_post_query_barrier = true;
647                         ret = vmw_bo_to_validate_list(sw_context,
648                                                       sw_context->cur_query_bo,
649                                                       dev_priv->has_mob, NULL);
650                         if (unlikely(ret != 0))
651                                 return ret;
652                 }
653                 sw_context->cur_query_bo = new_query_bo;
654
655                 ret = vmw_bo_to_validate_list(sw_context,
656                                               dev_priv->dummy_query_bo,
657                                               dev_priv->has_mob, NULL);
658                 if (unlikely(ret != 0))
659                         return ret;
660
661         }
662
663         return 0;
664 }
665
666
667 /**
668  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
669  *
670  * @dev_priv: The device private structure.
671  * @sw_context: The software context used for this command submission batch.
672  *
673  * This function will check if we're switching query buffers, and will then,
674  * issue a dummy occlusion query wait used as a query barrier. When the fence
675  * object following that query wait has signaled, we are sure that all
676  * preceding queries have finished, and the old query buffer can be unpinned.
677  * However, since both the new query buffer and the old one are fenced with
678  * that fence, we can do an asynchronus unpin now, and be sure that the
679  * old query buffer won't be moved until the fence has signaled.
680  *
681  * As mentioned above, both the new - and old query buffers need to be fenced
682  * using a sequence emitted *after* calling this function.
683  */
684 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
685                                      struct vmw_sw_context *sw_context)
686 {
687         /*
688          * The validate list should still hold references to all
689          * contexts here.
690          */
691
692         if (sw_context->needs_post_query_barrier) {
693                 struct vmw_res_cache_entry *ctx_entry =
694                         &sw_context->res_cache[vmw_res_context];
695                 struct vmw_resource *ctx;
696                 int ret;
697
698                 BUG_ON(!ctx_entry->valid);
699                 ctx = ctx_entry->res;
700
701                 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
702
703                 if (unlikely(ret != 0))
704                         DRM_ERROR("Out of fifo space for dummy query.\n");
705         }
706
707         if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
708                 if (dev_priv->pinned_bo) {
709                         vmw_bo_pin(dev_priv->pinned_bo, false);
710                         ttm_bo_unref(&dev_priv->pinned_bo);
711                 }
712
713                 if (!sw_context->needs_post_query_barrier) {
714                         vmw_bo_pin(sw_context->cur_query_bo, true);
715
716                         /*
717                          * We pin also the dummy_query_bo buffer so that we
718                          * don't need to validate it when emitting
719                          * dummy queries in context destroy paths.
720                          */
721
722                         vmw_bo_pin(dev_priv->dummy_query_bo, true);
723                         dev_priv->dummy_query_bo_pinned = true;
724
725                         BUG_ON(sw_context->last_query_ctx == NULL);
726                         dev_priv->query_cid = sw_context->last_query_ctx->id;
727                         dev_priv->query_cid_valid = true;
728                         dev_priv->pinned_bo =
729                                 ttm_bo_reference(sw_context->cur_query_bo);
730                 }
731         }
732 }
733
734 /**
735  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
736  * handle to a MOB id.
737  *
738  * @dev_priv: Pointer to a device private structure.
739  * @sw_context: The software context used for this command batch validation.
740  * @id: Pointer to the user-space handle to be translated.
741  * @vmw_bo_p: Points to a location that, on successful return will carry
742  * a reference-counted pointer to the DMA buffer identified by the
743  * user-space handle in @id.
744  *
745  * This function saves information needed to translate a user-space buffer
746  * handle to a MOB id. The translation does not take place immediately, but
747  * during a call to vmw_apply_relocations(). This function builds a relocation
748  * list and a list of buffers to validate. The former needs to be freed using
749  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
750  * needs to be freed using vmw_clear_validations.
751  */
752 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
753                                  struct vmw_sw_context *sw_context,
754                                  SVGAMobId *id,
755                                  struct vmw_dma_buffer **vmw_bo_p)
756 {
757         struct vmw_dma_buffer *vmw_bo = NULL;
758         struct ttm_buffer_object *bo;
759         uint32_t handle = *id;
760         struct vmw_relocation *reloc;
761         int ret;
762
763         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
764         if (unlikely(ret != 0)) {
765                 DRM_ERROR("Could not find or use MOB buffer.\n");
766                 return -EINVAL;
767         }
768         bo = &vmw_bo->base;
769
770         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
771                 DRM_ERROR("Max number relocations per submission"
772                           " exceeded\n");
773                 ret = -EINVAL;
774                 goto out_no_reloc;
775         }
776
777         reloc = &sw_context->relocs[sw_context->cur_reloc++];
778         reloc->mob_loc = id;
779         reloc->location = NULL;
780
781         ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
782         if (unlikely(ret != 0))
783                 goto out_no_reloc;
784
785         *vmw_bo_p = vmw_bo;
786         return 0;
787
788 out_no_reloc:
789         vmw_dmabuf_unreference(&vmw_bo);
790         vmw_bo_p = NULL;
791         return ret;
792 }
793
794 /**
795  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
796  * handle to a valid SVGAGuestPtr
797  *
798  * @dev_priv: Pointer to a device private structure.
799  * @sw_context: The software context used for this command batch validation.
800  * @ptr: Pointer to the user-space handle to be translated.
801  * @vmw_bo_p: Points to a location that, on successful return will carry
802  * a reference-counted pointer to the DMA buffer identified by the
803  * user-space handle in @id.
804  *
805  * This function saves information needed to translate a user-space buffer
806  * handle to a valid SVGAGuestPtr. The translation does not take place
807  * immediately, but during a call to vmw_apply_relocations().
808  * This function builds a relocation list and a list of buffers to validate.
809  * The former needs to be freed using either vmw_apply_relocations() or
810  * vmw_free_relocations(). The latter needs to be freed using
811  * vmw_clear_validations.
812  */
813 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
814                                    struct vmw_sw_context *sw_context,
815                                    SVGAGuestPtr *ptr,
816                                    struct vmw_dma_buffer **vmw_bo_p)
817 {
818         struct vmw_dma_buffer *vmw_bo = NULL;
819         struct ttm_buffer_object *bo;
820         uint32_t handle = ptr->gmrId;
821         struct vmw_relocation *reloc;
822         int ret;
823
824         ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
825         if (unlikely(ret != 0)) {
826                 DRM_ERROR("Could not find or use GMR region.\n");
827                 return -EINVAL;
828         }
829         bo = &vmw_bo->base;
830
831         if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
832                 DRM_ERROR("Max number relocations per submission"
833                           " exceeded\n");
834                 ret = -EINVAL;
835                 goto out_no_reloc;
836         }
837
838         reloc = &sw_context->relocs[sw_context->cur_reloc++];
839         reloc->location = ptr;
840
841         ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
842         if (unlikely(ret != 0))
843                 goto out_no_reloc;
844
845         *vmw_bo_p = vmw_bo;
846         return 0;
847
848 out_no_reloc:
849         vmw_dmabuf_unreference(&vmw_bo);
850         vmw_bo_p = NULL;
851         return ret;
852 }
853
854 /**
855  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
856  *
857  * @dev_priv: Pointer to a device private struct.
858  * @sw_context: The software context used for this command submission.
859  * @header: Pointer to the command header in the command stream.
860  */
861 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
862                                   struct vmw_sw_context *sw_context,
863                                   SVGA3dCmdHeader *header)
864 {
865         struct vmw_begin_gb_query_cmd {
866                 SVGA3dCmdHeader header;
867                 SVGA3dCmdBeginGBQuery q;
868         } *cmd;
869
870         cmd = container_of(header, struct vmw_begin_gb_query_cmd,
871                            header);
872
873         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
874                                  user_context_converter, &cmd->q.cid,
875                                  NULL);
876 }
877
878 /**
879  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
880  *
881  * @dev_priv: Pointer to a device private struct.
882  * @sw_context: The software context used for this command submission.
883  * @header: Pointer to the command header in the command stream.
884  */
885 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
886                                struct vmw_sw_context *sw_context,
887                                SVGA3dCmdHeader *header)
888 {
889         struct vmw_begin_query_cmd {
890                 SVGA3dCmdHeader header;
891                 SVGA3dCmdBeginQuery q;
892         } *cmd;
893
894         cmd = container_of(header, struct vmw_begin_query_cmd,
895                            header);
896
897         if (unlikely(dev_priv->has_mob)) {
898                 struct {
899                         SVGA3dCmdHeader header;
900                         SVGA3dCmdBeginGBQuery q;
901                 } gb_cmd;
902
903                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
904
905                 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
906                 gb_cmd.header.size = cmd->header.size;
907                 gb_cmd.q.cid = cmd->q.cid;
908                 gb_cmd.q.type = cmd->q.type;
909
910                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
911                 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
912         }
913
914         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
915                                  user_context_converter, &cmd->q.cid,
916                                  NULL);
917 }
918
919 /**
920  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
921  *
922  * @dev_priv: Pointer to a device private struct.
923  * @sw_context: The software context used for this command submission.
924  * @header: Pointer to the command header in the command stream.
925  */
926 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
927                                 struct vmw_sw_context *sw_context,
928                                 SVGA3dCmdHeader *header)
929 {
930         struct vmw_dma_buffer *vmw_bo;
931         struct vmw_query_cmd {
932                 SVGA3dCmdHeader header;
933                 SVGA3dCmdEndGBQuery q;
934         } *cmd;
935         int ret;
936
937         cmd = container_of(header, struct vmw_query_cmd, header);
938         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
939         if (unlikely(ret != 0))
940                 return ret;
941
942         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
943                                     &cmd->q.mobid,
944                                     &vmw_bo);
945         if (unlikely(ret != 0))
946                 return ret;
947
948         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
949
950         vmw_dmabuf_unreference(&vmw_bo);
951         return ret;
952 }
953
954 /**
955  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
956  *
957  * @dev_priv: Pointer to a device private struct.
958  * @sw_context: The software context used for this command submission.
959  * @header: Pointer to the command header in the command stream.
960  */
961 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
962                              struct vmw_sw_context *sw_context,
963                              SVGA3dCmdHeader *header)
964 {
965         struct vmw_dma_buffer *vmw_bo;
966         struct vmw_query_cmd {
967                 SVGA3dCmdHeader header;
968                 SVGA3dCmdEndQuery q;
969         } *cmd;
970         int ret;
971
972         cmd = container_of(header, struct vmw_query_cmd, header);
973         if (dev_priv->has_mob) {
974                 struct {
975                         SVGA3dCmdHeader header;
976                         SVGA3dCmdEndGBQuery q;
977                 } gb_cmd;
978
979                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
980
981                 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
982                 gb_cmd.header.size = cmd->header.size;
983                 gb_cmd.q.cid = cmd->q.cid;
984                 gb_cmd.q.type = cmd->q.type;
985                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
986                 gb_cmd.q.offset = cmd->q.guestResult.offset;
987
988                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
989                 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
990         }
991
992         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
993         if (unlikely(ret != 0))
994                 return ret;
995
996         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
997                                       &cmd->q.guestResult,
998                                       &vmw_bo);
999         if (unlikely(ret != 0))
1000                 return ret;
1001
1002         ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1003
1004         vmw_dmabuf_unreference(&vmw_bo);
1005         return ret;
1006 }
1007
1008 /**
1009  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1010  *
1011  * @dev_priv: Pointer to a device private struct.
1012  * @sw_context: The software context used for this command submission.
1013  * @header: Pointer to the command header in the command stream.
1014  */
1015 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1016                                  struct vmw_sw_context *sw_context,
1017                                  SVGA3dCmdHeader *header)
1018 {
1019         struct vmw_dma_buffer *vmw_bo;
1020         struct vmw_query_cmd {
1021                 SVGA3dCmdHeader header;
1022                 SVGA3dCmdWaitForGBQuery q;
1023         } *cmd;
1024         int ret;
1025
1026         cmd = container_of(header, struct vmw_query_cmd, header);
1027         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1028         if (unlikely(ret != 0))
1029                 return ret;
1030
1031         ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1032                                     &cmd->q.mobid,
1033                                     &vmw_bo);
1034         if (unlikely(ret != 0))
1035                 return ret;
1036
1037         vmw_dmabuf_unreference(&vmw_bo);
1038         return 0;
1039 }
1040
1041 /**
1042  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1043  *
1044  * @dev_priv: Pointer to a device private struct.
1045  * @sw_context: The software context used for this command submission.
1046  * @header: Pointer to the command header in the command stream.
1047  */
1048 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1049                               struct vmw_sw_context *sw_context,
1050                               SVGA3dCmdHeader *header)
1051 {
1052         struct vmw_dma_buffer *vmw_bo;
1053         struct vmw_query_cmd {
1054                 SVGA3dCmdHeader header;
1055                 SVGA3dCmdWaitForQuery q;
1056         } *cmd;
1057         int ret;
1058
1059         cmd = container_of(header, struct vmw_query_cmd, header);
1060         if (dev_priv->has_mob) {
1061                 struct {
1062                         SVGA3dCmdHeader header;
1063                         SVGA3dCmdWaitForGBQuery q;
1064                 } gb_cmd;
1065
1066                 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1067
1068                 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1069                 gb_cmd.header.size = cmd->header.size;
1070                 gb_cmd.q.cid = cmd->q.cid;
1071                 gb_cmd.q.type = cmd->q.type;
1072                 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1073                 gb_cmd.q.offset = cmd->q.guestResult.offset;
1074
1075                 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1076                 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1077         }
1078
1079         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1080         if (unlikely(ret != 0))
1081                 return ret;
1082
1083         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1084                                       &cmd->q.guestResult,
1085                                       &vmw_bo);
1086         if (unlikely(ret != 0))
1087                 return ret;
1088
1089         vmw_dmabuf_unreference(&vmw_bo);
1090         return 0;
1091 }
1092
1093 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1094                        struct vmw_sw_context *sw_context,
1095                        SVGA3dCmdHeader *header)
1096 {
1097         struct vmw_dma_buffer *vmw_bo = NULL;
1098         struct vmw_surface *srf = NULL;
1099         struct vmw_dma_cmd {
1100                 SVGA3dCmdHeader header;
1101                 SVGA3dCmdSurfaceDMA dma;
1102         } *cmd;
1103         int ret;
1104
1105         cmd = container_of(header, struct vmw_dma_cmd, header);
1106         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1107                                       &cmd->dma.guest.ptr,
1108                                       &vmw_bo);
1109         if (unlikely(ret != 0))
1110                 return ret;
1111
1112         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1113                                 user_surface_converter, &cmd->dma.host.sid,
1114                                 NULL);
1115         if (unlikely(ret != 0)) {
1116                 if (unlikely(ret != -ERESTARTSYS))
1117                         DRM_ERROR("could not find surface for DMA.\n");
1118                 goto out_no_surface;
1119         }
1120
1121         srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1122
1123         vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
1124
1125 out_no_surface:
1126         vmw_dmabuf_unreference(&vmw_bo);
1127         return ret;
1128 }
1129
1130 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1131                         struct vmw_sw_context *sw_context,
1132                         SVGA3dCmdHeader *header)
1133 {
1134         struct vmw_draw_cmd {
1135                 SVGA3dCmdHeader header;
1136                 SVGA3dCmdDrawPrimitives body;
1137         } *cmd;
1138         SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1139                 (unsigned long)header + sizeof(*cmd));
1140         SVGA3dPrimitiveRange *range;
1141         uint32_t i;
1142         uint32_t maxnum;
1143         int ret;
1144
1145         ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1146         if (unlikely(ret != 0))
1147                 return ret;
1148
1149         cmd = container_of(header, struct vmw_draw_cmd, header);
1150         maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1151
1152         if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1153                 DRM_ERROR("Illegal number of vertex declarations.\n");
1154                 return -EINVAL;
1155         }
1156
1157         for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1158                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1159                                         user_surface_converter,
1160                                         &decl->array.surfaceId, NULL);
1161                 if (unlikely(ret != 0))
1162                         return ret;
1163         }
1164
1165         maxnum = (header->size - sizeof(cmd->body) -
1166                   cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1167         if (unlikely(cmd->body.numRanges > maxnum)) {
1168                 DRM_ERROR("Illegal number of index ranges.\n");
1169                 return -EINVAL;
1170         }
1171
1172         range = (SVGA3dPrimitiveRange *) decl;
1173         for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1174                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1175                                         user_surface_converter,
1176                                         &range->indexArray.surfaceId, NULL);
1177                 if (unlikely(ret != 0))
1178                         return ret;
1179         }
1180         return 0;
1181 }
1182
1183
1184 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1185                              struct vmw_sw_context *sw_context,
1186                              SVGA3dCmdHeader *header)
1187 {
1188         struct vmw_tex_state_cmd {
1189                 SVGA3dCmdHeader header;
1190                 SVGA3dCmdSetTextureState state;
1191         } *cmd;
1192
1193         SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1194           ((unsigned long) header + header->size + sizeof(header));
1195         SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1196                 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1197         struct vmw_resource_val_node *ctx_node;
1198         int ret;
1199
1200         cmd = container_of(header, struct vmw_tex_state_cmd,
1201                            header);
1202
1203         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1204                                 user_context_converter, &cmd->state.cid,
1205                                 &ctx_node);
1206         if (unlikely(ret != 0))
1207                 return ret;
1208
1209         for (; cur_state < last_state; ++cur_state) {
1210                 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1211                         continue;
1212
1213                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1214                                         user_surface_converter,
1215                                         &cur_state->value, NULL);
1216                 if (unlikely(ret != 0))
1217                         return ret;
1218
1219                 if (dev_priv->has_mob) {
1220                         struct vmw_ctx_bindinfo bi;
1221
1222                         bi.ctx = ctx_node->res;
1223                         bi.bt = vmw_ctx_binding_tex;
1224                         bi.i1.texture_stage = cur_state->stage;
1225                         vmw_context_binding_add(ctx_node->staged_bindings,
1226                                                 &bi);
1227                 }
1228         }
1229
1230         return 0;
1231 }
1232
1233 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1234                                       struct vmw_sw_context *sw_context,
1235                                       void *buf)
1236 {
1237         struct vmw_dma_buffer *vmw_bo;
1238         int ret;
1239
1240         struct {
1241                 uint32_t header;
1242                 SVGAFifoCmdDefineGMRFB body;
1243         } *cmd = buf;
1244
1245         ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1246                                       &cmd->body.ptr,
1247                                       &vmw_bo);
1248         if (unlikely(ret != 0))
1249                 return ret;
1250
1251         vmw_dmabuf_unreference(&vmw_bo);
1252
1253         return ret;
1254 }
1255
1256 /**
1257  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1258  *
1259  * @dev_priv: Pointer to a device private struct.
1260  * @sw_context: The software context being used for this batch.
1261  * @res_type: The resource type.
1262  * @converter: Information about user-space binding for this resource type.
1263  * @res_id: Pointer to the user-space resource handle in the command stream.
1264  * @buf_id: Pointer to the user-space backup buffer handle in the command
1265  * stream.
1266  * @backup_offset: Offset of backup into MOB.
1267  *
1268  * This function prepares for registering a switch of backup buffers
1269  * in the resource metadata just prior to unreserving.
1270  */
1271 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1272                                  struct vmw_sw_context *sw_context,
1273                                  enum vmw_res_type res_type,
1274                                  const struct vmw_user_resource_conv
1275                                  *converter,
1276                                  uint32_t *res_id,
1277                                  uint32_t *buf_id,
1278                                  unsigned long backup_offset)
1279 {
1280         int ret;
1281         struct vmw_dma_buffer *dma_buf;
1282         struct vmw_resource_val_node *val_node;
1283
1284         ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1285                                 converter, res_id, &val_node);
1286         if (unlikely(ret != 0))
1287                 return ret;
1288
1289         ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1290         if (unlikely(ret != 0))
1291                 return ret;
1292
1293         if (val_node->first_usage)
1294                 val_node->no_buffer_needed = true;
1295
1296         vmw_dmabuf_unreference(&val_node->new_backup);
1297         val_node->new_backup = dma_buf;
1298         val_node->new_backup_offset = backup_offset;
1299
1300         return 0;
1301 }
1302
1303 /**
1304  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1305  * command
1306  *
1307  * @dev_priv: Pointer to a device private struct.
1308  * @sw_context: The software context being used for this batch.
1309  * @header: Pointer to the command header in the command stream.
1310  */
1311 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1312                                    struct vmw_sw_context *sw_context,
1313                                    SVGA3dCmdHeader *header)
1314 {
1315         struct vmw_bind_gb_surface_cmd {
1316                 SVGA3dCmdHeader header;
1317                 SVGA3dCmdBindGBSurface body;
1318         } *cmd;
1319
1320         cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1321
1322         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1323                                      user_surface_converter,
1324                                      &cmd->body.sid, &cmd->body.mobid,
1325                                      0);
1326 }
1327
1328 /**
1329  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1330  * command
1331  *
1332  * @dev_priv: Pointer to a device private struct.
1333  * @sw_context: The software context being used for this batch.
1334  * @header: Pointer to the command header in the command stream.
1335  */
1336 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1337                                    struct vmw_sw_context *sw_context,
1338                                    SVGA3dCmdHeader *header)
1339 {
1340         struct vmw_gb_surface_cmd {
1341                 SVGA3dCmdHeader header;
1342                 SVGA3dCmdUpdateGBImage body;
1343         } *cmd;
1344
1345         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1346
1347         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1348                                  user_surface_converter,
1349                                  &cmd->body.image.sid, NULL);
1350 }
1351
1352 /**
1353  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1354  * command
1355  *
1356  * @dev_priv: Pointer to a device private struct.
1357  * @sw_context: The software context being used for this batch.
1358  * @header: Pointer to the command header in the command stream.
1359  */
1360 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1361                                      struct vmw_sw_context *sw_context,
1362                                      SVGA3dCmdHeader *header)
1363 {
1364         struct vmw_gb_surface_cmd {
1365                 SVGA3dCmdHeader header;
1366                 SVGA3dCmdUpdateGBSurface body;
1367         } *cmd;
1368
1369         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1370
1371         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1372                                  user_surface_converter,
1373                                  &cmd->body.sid, NULL);
1374 }
1375
1376 /**
1377  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1378  * command
1379  *
1380  * @dev_priv: Pointer to a device private struct.
1381  * @sw_context: The software context being used for this batch.
1382  * @header: Pointer to the command header in the command stream.
1383  */
1384 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1385                                      struct vmw_sw_context *sw_context,
1386                                      SVGA3dCmdHeader *header)
1387 {
1388         struct vmw_gb_surface_cmd {
1389                 SVGA3dCmdHeader header;
1390                 SVGA3dCmdReadbackGBImage body;
1391         } *cmd;
1392
1393         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1394
1395         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1396                                  user_surface_converter,
1397                                  &cmd->body.image.sid, NULL);
1398 }
1399
1400 /**
1401  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1402  * command
1403  *
1404  * @dev_priv: Pointer to a device private struct.
1405  * @sw_context: The software context being used for this batch.
1406  * @header: Pointer to the command header in the command stream.
1407  */
1408 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1409                                        struct vmw_sw_context *sw_context,
1410                                        SVGA3dCmdHeader *header)
1411 {
1412         struct vmw_gb_surface_cmd {
1413                 SVGA3dCmdHeader header;
1414                 SVGA3dCmdReadbackGBSurface body;
1415         } *cmd;
1416
1417         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1418
1419         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1420                                  user_surface_converter,
1421                                  &cmd->body.sid, NULL);
1422 }
1423
1424 /**
1425  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1426  * command
1427  *
1428  * @dev_priv: Pointer to a device private struct.
1429  * @sw_context: The software context being used for this batch.
1430  * @header: Pointer to the command header in the command stream.
1431  */
1432 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1433                                        struct vmw_sw_context *sw_context,
1434                                        SVGA3dCmdHeader *header)
1435 {
1436         struct vmw_gb_surface_cmd {
1437                 SVGA3dCmdHeader header;
1438                 SVGA3dCmdInvalidateGBImage body;
1439         } *cmd;
1440
1441         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1442
1443         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1444                                  user_surface_converter,
1445                                  &cmd->body.image.sid, NULL);
1446 }
1447
1448 /**
1449  * vmw_cmd_invalidate_gb_surface - Validate an
1450  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1451  *
1452  * @dev_priv: Pointer to a device private struct.
1453  * @sw_context: The software context being used for this batch.
1454  * @header: Pointer to the command header in the command stream.
1455  */
1456 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1457                                          struct vmw_sw_context *sw_context,
1458                                          SVGA3dCmdHeader *header)
1459 {
1460         struct vmw_gb_surface_cmd {
1461                 SVGA3dCmdHeader header;
1462                 SVGA3dCmdInvalidateGBSurface body;
1463         } *cmd;
1464
1465         cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1466
1467         return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1468                                  user_surface_converter,
1469                                  &cmd->body.sid, NULL);
1470 }
1471
1472 /**
1473  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1474  * command
1475  *
1476  * @dev_priv: Pointer to a device private struct.
1477  * @sw_context: The software context being used for this batch.
1478  * @header: Pointer to the command header in the command stream.
1479  */
1480 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1481                               struct vmw_sw_context *sw_context,
1482                               SVGA3dCmdHeader *header)
1483 {
1484         struct vmw_set_shader_cmd {
1485                 SVGA3dCmdHeader header;
1486                 SVGA3dCmdSetShader body;
1487         } *cmd;
1488         struct vmw_resource_val_node *ctx_node;
1489         int ret;
1490
1491         cmd = container_of(header, struct vmw_set_shader_cmd,
1492                            header);
1493
1494         ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1495                                 user_context_converter, &cmd->body.cid,
1496                                 &ctx_node);
1497         if (unlikely(ret != 0))
1498                 return ret;
1499
1500         if (dev_priv->has_mob) {
1501                 struct vmw_ctx_bindinfo bi;
1502
1503                 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
1504                                         user_shader_converter,
1505                                         &cmd->body.shid, NULL);
1506                 if (unlikely(ret != 0))
1507                         return ret;
1508
1509                 bi.ctx = ctx_node->res;
1510                 bi.bt = vmw_ctx_binding_shader;
1511                 bi.i1.shader_type = cmd->body.type;
1512                 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1513         }
1514
1515         return 0;
1516 }
1517
1518 /**
1519  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1520  * command
1521  *
1522  * @dev_priv: Pointer to a device private struct.
1523  * @sw_context: The software context being used for this batch.
1524  * @header: Pointer to the command header in the command stream.
1525  */
1526 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1527                                   struct vmw_sw_context *sw_context,
1528                                   SVGA3dCmdHeader *header)
1529 {
1530         struct vmw_bind_gb_shader_cmd {
1531                 SVGA3dCmdHeader header;
1532                 SVGA3dCmdBindGBShader body;
1533         } *cmd;
1534
1535         cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1536                            header);
1537
1538         return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1539                                      user_shader_converter,
1540                                      &cmd->body.shid, &cmd->body.mobid,
1541                                      cmd->body.offsetInBytes);
1542 }
1543
1544 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1545                                 struct vmw_sw_context *sw_context,
1546                                 void *buf, uint32_t *size)
1547 {
1548         uint32_t size_remaining = *size;
1549         uint32_t cmd_id;
1550
1551         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1552         switch (cmd_id) {
1553         case SVGA_CMD_UPDATE:
1554                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1555                 break;
1556         case SVGA_CMD_DEFINE_GMRFB:
1557                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1558                 break;
1559         case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1560                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1561                 break;
1562         case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1563                 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1564                 break;
1565         default:
1566                 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1567                 return -EINVAL;
1568         }
1569
1570         if (*size > size_remaining) {
1571                 DRM_ERROR("Invalid SVGA command (size mismatch):"
1572                           " %u.\n", cmd_id);
1573                 return -EINVAL;
1574         }
1575
1576         if (unlikely(!sw_context->kernel)) {
1577                 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1578                 return -EPERM;
1579         }
1580
1581         if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1582                 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1583
1584         return 0;
1585 }
1586
1587 static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1588         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1589                     false, false, false),
1590         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1591                     false, false, false),
1592         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1593                     true, false, false),
1594         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1595                     true, false, false),
1596         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1597                     true, false, false),
1598         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1599                     false, false, false),
1600         VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1601                     false, false, false),
1602         VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1603                     true, false, false),
1604         VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1605                     true, false, false),
1606         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1607                     true, false, false),
1608         VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1609                     &vmw_cmd_set_render_target_check, true, false, false),
1610         VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1611                     true, false, false),
1612         VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1613                     true, false, false),
1614         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1615                     true, false, false),
1616         VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1617                     true, false, false),
1618         VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1619                     true, false, false),
1620         VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1621                     true, false, false),
1622         VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1623                     true, false, false),
1624         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1625                     false, false, false),
1626         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
1627                     true, true, false),
1628         VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
1629                     true, true, false),
1630         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1631                     true, false, false),
1632         VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
1633                     true, true, false),
1634         VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1635                     true, false, false),
1636         VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1637                     true, false, false),
1638         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1639                     true, false, false),
1640         VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1641                     true, false, false),
1642         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1643                     true, false, false),
1644         VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1645                     true, false, false),
1646         VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1647                     &vmw_cmd_blt_surf_screen_check, false, false, false),
1648         VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1649                     false, false, false),
1650         VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1651                     false, false, false),
1652         VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1653                     false, false, false),
1654         VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1655                     false, false, false),
1656         VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1657                     false, false, false),
1658         VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1659                     false, false, false),
1660         VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1661                     false, false, false),
1662         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1663                     false, false, false),
1664         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1665                     false, false, false),
1666         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1667                     false, false, false),
1668         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1669                     false, false, false),
1670         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1671                     false, false, false),
1672         VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1673                     false, false, false),
1674         VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1675                     false, false, true),
1676         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1677                     false, false, true),
1678         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1679                     false, false, true),
1680         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1681                     false, false, true),
1682         VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1683                     false, false, true),
1684         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1685                     false, false, true),
1686         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1687                     false, false, true),
1688         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1689                     false, false, true),
1690         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1691                     true, false, true),
1692         VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1693                     false, false, true),
1694         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1695                     true, false, true),
1696         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1697                     &vmw_cmd_update_gb_surface, true, false, true),
1698         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1699                     &vmw_cmd_readback_gb_image, true, false, true),
1700         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1701                     &vmw_cmd_readback_gb_surface, true, false, true),
1702         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1703                     &vmw_cmd_invalidate_gb_image, true, false, true),
1704         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1705                     &vmw_cmd_invalidate_gb_surface, true, false, true),
1706         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1707                     false, false, true),
1708         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1709                     false, false, true),
1710         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1711                     false, false, true),
1712         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1713                     false, false, true),
1714         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1715                     false, false, true),
1716         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1717                     false, false, true),
1718         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1719                     true, false, true),
1720         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1721                     false, false, true),
1722         VMW_CMD_DEF(SVGA_3D_CMD_BIND_SHADERCONSTS, &vmw_cmd_invalid,
1723                     false, false, false),
1724         VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1725                     true, false, true),
1726         VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
1727                     true, false, true),
1728         VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
1729                     true, false, true),
1730         VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
1731                     true, false, true),
1732         VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
1733                     false, false, true),
1734         VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
1735                     false, false, true),
1736         VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
1737                     false, false, true),
1738         VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
1739                     false, false, true),
1740         VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
1741                     false, false, true),
1742         VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
1743                     false, false, true),
1744         VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
1745                     false, false, true),
1746         VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
1747                     false, false, true),
1748         VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1749                     false, false, true),
1750         VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
1751                     false, false, true),
1752         VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
1753                     true, false, true)
1754 };
1755
1756 static int vmw_cmd_check(struct vmw_private *dev_priv,
1757                          struct vmw_sw_context *sw_context,
1758                          void *buf, uint32_t *size)
1759 {
1760         uint32_t cmd_id;
1761         uint32_t size_remaining = *size;
1762         SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1763         int ret;
1764         const struct vmw_cmd_entry *entry;
1765         bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
1766
1767         cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1768         /* Handle any none 3D commands */
1769         if (unlikely(cmd_id < SVGA_CMD_MAX))
1770                 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1771
1772
1773         cmd_id = le32_to_cpu(header->id);
1774         *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1775
1776         cmd_id -= SVGA_3D_CMD_BASE;
1777         if (unlikely(*size > size_remaining))
1778                 goto out_invalid;
1779
1780         if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1781                 goto out_invalid;
1782
1783         entry = &vmw_cmd_entries[cmd_id];
1784         if (unlikely(!entry->user_allow && !sw_context->kernel))
1785                 goto out_privileged;
1786
1787         if (unlikely(entry->gb_disable && gb))
1788                 goto out_old;
1789
1790         if (unlikely(entry->gb_enable && !gb))
1791                 goto out_new;
1792
1793         ret = entry->func(dev_priv, sw_context, header);
1794         if (unlikely(ret != 0))
1795                 goto out_invalid;
1796
1797         return 0;
1798 out_invalid:
1799         DRM_ERROR("Invalid SVGA3D command: %d\n",
1800                   cmd_id + SVGA_3D_CMD_BASE);
1801         return -EINVAL;
1802 out_privileged:
1803         DRM_ERROR("Privileged SVGA3D command: %d\n",
1804                   cmd_id + SVGA_3D_CMD_BASE);
1805         return -EPERM;
1806 out_old:
1807         DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
1808                   cmd_id + SVGA_3D_CMD_BASE);
1809         return -EINVAL;
1810 out_new:
1811         DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
1812                   cmd_id + SVGA_3D_CMD_BASE);
1813         return -EINVAL;
1814 }
1815
1816 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1817                              struct vmw_sw_context *sw_context,
1818                              void *buf,
1819                              uint32_t size)
1820 {
1821         int32_t cur_size = size;
1822         int ret;
1823
1824         sw_context->buf_start = buf;
1825
1826         while (cur_size > 0) {
1827                 size = cur_size;
1828                 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1829                 if (unlikely(ret != 0))
1830                         return ret;
1831                 buf = (void *)((unsigned long) buf + size);
1832                 cur_size -= size;
1833         }
1834
1835         if (unlikely(cur_size != 0)) {
1836                 DRM_ERROR("Command verifier out of sync.\n");
1837                 return -EINVAL;
1838         }
1839
1840         return 0;
1841 }
1842
1843 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1844 {
1845         sw_context->cur_reloc = 0;
1846 }
1847
1848 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1849 {
1850         uint32_t i;
1851         struct vmw_relocation *reloc;
1852         struct ttm_validate_buffer *validate;
1853         struct ttm_buffer_object *bo;
1854
1855         for (i = 0; i < sw_context->cur_reloc; ++i) {
1856                 reloc = &sw_context->relocs[i];
1857                 validate = &sw_context->val_bufs[reloc->index].base;
1858                 bo = validate->bo;
1859                 switch (bo->mem.mem_type) {
1860                 case TTM_PL_VRAM:
1861                         reloc->location->offset += bo->offset;
1862                         reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
1863                         break;
1864                 case VMW_PL_GMR:
1865                         reloc->location->gmrId = bo->mem.start;
1866                         break;
1867                 case VMW_PL_MOB:
1868                         *reloc->mob_loc = bo->mem.start;
1869                         break;
1870                 default:
1871                         BUG();
1872                 }
1873         }
1874         vmw_free_relocations(sw_context);
1875 }
1876
1877 /**
1878  * vmw_resource_list_unrefererence - Free up a resource list and unreference
1879  * all resources referenced by it.
1880  *
1881  * @list: The resource list.
1882  */
1883 static void vmw_resource_list_unreference(struct list_head *list)
1884 {
1885         struct vmw_resource_val_node *val, *val_next;
1886
1887         /*
1888          * Drop references to resources held during command submission.
1889          */
1890
1891         list_for_each_entry_safe(val, val_next, list, head) {
1892                 list_del_init(&val->head);
1893                 vmw_resource_unreference(&val->res);
1894                 if (unlikely(val->staged_bindings))
1895                         kfree(val->staged_bindings);
1896                 kfree(val);
1897         }
1898 }
1899
1900 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1901 {
1902         struct vmw_validate_buffer *entry, *next;
1903         struct vmw_resource_val_node *val;
1904
1905         /*
1906          * Drop references to DMA buffers held during command submission.
1907          */
1908         list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
1909                                  base.head) {
1910                 list_del(&entry->base.head);
1911                 ttm_bo_unref(&entry->base.bo);
1912                 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
1913                 sw_context->cur_val_buf--;
1914         }
1915         BUG_ON(sw_context->cur_val_buf != 0);
1916
1917         list_for_each_entry(val, &sw_context->resource_list, head)
1918                 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
1919 }
1920
1921 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1922                                       struct ttm_buffer_object *bo,
1923                                       bool validate_as_mob)
1924 {
1925         int ret;
1926
1927
1928         /*
1929          * Don't validate pinned buffers.
1930          */
1931
1932         if (bo == dev_priv->pinned_bo ||
1933             (bo == dev_priv->dummy_query_bo &&
1934              dev_priv->dummy_query_bo_pinned))
1935                 return 0;
1936
1937         if (validate_as_mob)
1938                 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
1939
1940         /**
1941          * Put BO in VRAM if there is space, otherwise as a GMR.
1942          * If there is no space in VRAM and GMR ids are all used up,
1943          * start evicting GMRs to make room. If the DMA buffer can't be
1944          * used as a GMR, this will return -ENOMEM.
1945          */
1946
1947         ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
1948         if (likely(ret == 0 || ret == -ERESTARTSYS))
1949                 return ret;
1950
1951         /**
1952          * If that failed, try VRAM again, this time evicting
1953          * previous contents.
1954          */
1955
1956         DRM_INFO("Falling through to VRAM.\n");
1957         ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
1958         return ret;
1959 }
1960
1961 static int vmw_validate_buffers(struct vmw_private *dev_priv,
1962                                 struct vmw_sw_context *sw_context)
1963 {
1964         struct vmw_validate_buffer *entry;
1965         int ret;
1966
1967         list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1968                 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
1969                                                  entry->validate_as_mob);
1970                 if (unlikely(ret != 0))
1971                         return ret;
1972         }
1973         return 0;
1974 }
1975
1976 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1977                                  uint32_t size)
1978 {
1979         if (likely(sw_context->cmd_bounce_size >= size))
1980                 return 0;
1981
1982         if (sw_context->cmd_bounce_size == 0)
1983                 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1984
1985         while (sw_context->cmd_bounce_size < size) {
1986                 sw_context->cmd_bounce_size =
1987                         PAGE_ALIGN(sw_context->cmd_bounce_size +
1988                                    (sw_context->cmd_bounce_size >> 1));
1989         }
1990
1991         if (sw_context->cmd_bounce != NULL)
1992                 vfree(sw_context->cmd_bounce);
1993
1994         sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
1995
1996         if (sw_context->cmd_bounce == NULL) {
1997                 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1998                 sw_context->cmd_bounce_size = 0;
1999                 return -ENOMEM;
2000         }
2001
2002         return 0;
2003 }
2004
2005 /**
2006  * vmw_execbuf_fence_commands - create and submit a command stream fence
2007  *
2008  * Creates a fence object and submits a command stream marker.
2009  * If this fails for some reason, We sync the fifo and return NULL.
2010  * It is then safe to fence buffers with a NULL pointer.
2011  *
2012  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2013  * a userspace handle if @p_handle is not NULL, otherwise not.
2014  */
2015
2016 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2017                                struct vmw_private *dev_priv,
2018                                struct vmw_fence_obj **p_fence,
2019                                uint32_t *p_handle)
2020 {
2021         uint32_t sequence;
2022         int ret;
2023         bool synced = false;
2024
2025         /* p_handle implies file_priv. */
2026         BUG_ON(p_handle != NULL && file_priv == NULL);
2027
2028         ret = vmw_fifo_send_fence(dev_priv, &sequence);
2029         if (unlikely(ret != 0)) {
2030                 DRM_ERROR("Fence submission error. Syncing.\n");
2031                 synced = true;
2032         }
2033
2034         if (p_handle != NULL)
2035                 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2036                                             sequence,
2037                                             DRM_VMW_FENCE_FLAG_EXEC,
2038                                             p_fence, p_handle);
2039         else
2040                 ret = vmw_fence_create(dev_priv->fman, sequence,
2041                                        DRM_VMW_FENCE_FLAG_EXEC,
2042                                        p_fence);
2043
2044         if (unlikely(ret != 0 && !synced)) {
2045                 (void) vmw_fallback_wait(dev_priv, false, false,
2046                                          sequence, false,
2047                                          VMW_FENCE_WAIT_TIMEOUT);
2048                 *p_fence = NULL;
2049         }
2050
2051         return 0;
2052 }
2053
2054 /**
2055  * vmw_execbuf_copy_fence_user - copy fence object information to
2056  * user-space.
2057  *
2058  * @dev_priv: Pointer to a vmw_private struct.
2059  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2060  * @ret: Return value from fence object creation.
2061  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2062  * which the information should be copied.
2063  * @fence: Pointer to the fenc object.
2064  * @fence_handle: User-space fence handle.
2065  *
2066  * This function copies fence information to user-space. If copying fails,
2067  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2068  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2069  * the error will hopefully be detected.
2070  * Also if copying fails, user-space will be unable to signal the fence
2071  * object so we wait for it immediately, and then unreference the
2072  * user-space reference.
2073  */
2074 void
2075 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2076                             struct vmw_fpriv *vmw_fp,
2077                             int ret,
2078                             struct drm_vmw_fence_rep __user *user_fence_rep,
2079                             struct vmw_fence_obj *fence,
2080                             uint32_t fence_handle)
2081 {
2082         struct drm_vmw_fence_rep fence_rep;
2083
2084         if (user_fence_rep == NULL)
2085                 return;
2086
2087         memset(&fence_rep, 0, sizeof(fence_rep));
2088
2089         fence_rep.error = ret;
2090         if (ret == 0) {
2091                 BUG_ON(fence == NULL);
2092
2093                 fence_rep.handle = fence_handle;
2094                 fence_rep.seqno = fence->seqno;
2095                 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2096                 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2097         }
2098
2099         /*
2100          * copy_to_user errors will be detected by user space not
2101          * seeing fence_rep::error filled in. Typically
2102          * user-space would have pre-set that member to -EFAULT.
2103          */
2104         ret = copy_to_user(user_fence_rep, &fence_rep,
2105                            sizeof(fence_rep));
2106
2107         /*
2108          * User-space lost the fence object. We need to sync
2109          * and unreference the handle.
2110          */
2111         if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2112                 ttm_ref_object_base_unref(vmw_fp->tfile,
2113                                           fence_handle, TTM_REF_USAGE);
2114                 DRM_ERROR("Fence copy error. Syncing.\n");
2115                 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2116                                           false, false,
2117                                           VMW_FENCE_WAIT_TIMEOUT);
2118         }
2119 }
2120
2121 int vmw_execbuf_process(struct drm_file *file_priv,
2122                         struct vmw_private *dev_priv,
2123                         void __user *user_commands,
2124                         void *kernel_commands,
2125                         uint32_t command_size,
2126                         uint64_t throttle_us,
2127                         struct drm_vmw_fence_rep __user *user_fence_rep,
2128                         struct vmw_fence_obj **out_fence)
2129 {
2130         struct vmw_sw_context *sw_context = &dev_priv->ctx;
2131         struct vmw_fence_obj *fence = NULL;
2132         struct vmw_resource *error_resource;
2133         struct list_head resource_list;
2134         struct ww_acquire_ctx ticket;
2135         uint32_t handle;
2136         void *cmd;
2137         int ret;
2138
2139         ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2140         if (unlikely(ret != 0))
2141                 return -ERESTARTSYS;
2142
2143         if (kernel_commands == NULL) {
2144                 sw_context->kernel = false;
2145
2146                 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2147                 if (unlikely(ret != 0))
2148                         goto out_unlock;
2149
2150
2151                 ret = copy_from_user(sw_context->cmd_bounce,
2152                                      user_commands, command_size);
2153
2154                 if (unlikely(ret != 0)) {
2155                         ret = -EFAULT;
2156                         DRM_ERROR("Failed copying commands.\n");
2157                         goto out_unlock;
2158                 }
2159                 kernel_commands = sw_context->cmd_bounce;
2160         } else
2161                 sw_context->kernel = true;
2162
2163         sw_context->tfile = vmw_fpriv(file_priv)->tfile;
2164         sw_context->cur_reloc = 0;
2165         sw_context->cur_val_buf = 0;
2166         sw_context->fence_flags = 0;
2167         INIT_LIST_HEAD(&sw_context->resource_list);
2168         sw_context->cur_query_bo = dev_priv->pinned_bo;
2169         sw_context->last_query_ctx = NULL;
2170         sw_context->needs_post_query_barrier = false;
2171         memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2172         INIT_LIST_HEAD(&sw_context->validate_nodes);
2173         INIT_LIST_HEAD(&sw_context->res_relocations);
2174         if (!sw_context->res_ht_initialized) {
2175                 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2176                 if (unlikely(ret != 0))
2177                         goto out_unlock;
2178                 sw_context->res_ht_initialized = true;
2179         }
2180
2181         INIT_LIST_HEAD(&resource_list);
2182         ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2183                                 command_size);
2184         if (unlikely(ret != 0))
2185                 goto out_err;
2186
2187         ret = vmw_resources_reserve(sw_context);
2188         if (unlikely(ret != 0))
2189                 goto out_err;
2190
2191         ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2192         if (unlikely(ret != 0))
2193                 goto out_err;
2194
2195         ret = vmw_validate_buffers(dev_priv, sw_context);
2196         if (unlikely(ret != 0))
2197                 goto out_err;
2198
2199         ret = vmw_resources_validate(sw_context);
2200         if (unlikely(ret != 0))
2201                 goto out_err;
2202
2203         if (throttle_us) {
2204                 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2205                                    throttle_us);
2206
2207                 if (unlikely(ret != 0))
2208                         goto out_err;
2209         }
2210
2211         cmd = vmw_fifo_reserve(dev_priv, command_size);
2212         if (unlikely(cmd == NULL)) {
2213                 DRM_ERROR("Failed reserving fifo space for commands.\n");
2214                 ret = -ENOMEM;
2215                 goto out_err;
2216         }
2217
2218         vmw_apply_relocations(sw_context);
2219         memcpy(cmd, kernel_commands, command_size);
2220
2221         vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2222         vmw_resource_relocations_free(&sw_context->res_relocations);
2223
2224         vmw_fifo_commit(dev_priv, command_size);
2225
2226         vmw_query_bo_switch_commit(dev_priv, sw_context);
2227         ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2228                                          &fence,
2229                                          (user_fence_rep) ? &handle : NULL);
2230         /*
2231          * This error is harmless, because if fence submission fails,
2232          * vmw_fifo_send_fence will sync. The error will be propagated to
2233          * user-space in @fence_rep
2234          */
2235
2236         if (ret != 0)
2237                 DRM_ERROR("Fence submission error. Syncing.\n");
2238
2239         vmw_resource_list_unreserve(&sw_context->resource_list, false);
2240         ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2241                                     (void *) fence);
2242
2243         if (unlikely(dev_priv->pinned_bo != NULL &&
2244                      !dev_priv->query_cid_valid))
2245                 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2246
2247         vmw_clear_validations(sw_context);
2248         vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2249                                     user_fence_rep, fence, handle);
2250
2251         /* Don't unreference when handing fence out */
2252         if (unlikely(out_fence != NULL)) {
2253                 *out_fence = fence;
2254                 fence = NULL;
2255         } else if (likely(fence != NULL)) {
2256                 vmw_fence_obj_unreference(&fence);
2257         }
2258
2259         list_splice_init(&sw_context->resource_list, &resource_list);
2260         mutex_unlock(&dev_priv->cmdbuf_mutex);
2261
2262         /*
2263          * Unreference resources outside of the cmdbuf_mutex to
2264          * avoid deadlocks in resource destruction paths.
2265          */
2266         vmw_resource_list_unreference(&resource_list);
2267
2268         return 0;
2269
2270 out_err:
2271         vmw_resource_relocations_free(&sw_context->res_relocations);
2272         vmw_free_relocations(sw_context);
2273         ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2274         vmw_resource_list_unreserve(&sw_context->resource_list, true);
2275         vmw_clear_validations(sw_context);
2276         if (unlikely(dev_priv->pinned_bo != NULL &&
2277                      !dev_priv->query_cid_valid))
2278                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2279 out_unlock:
2280         list_splice_init(&sw_context->resource_list, &resource_list);
2281         error_resource = sw_context->error_resource;
2282         sw_context->error_resource = NULL;
2283         mutex_unlock(&dev_priv->cmdbuf_mutex);
2284
2285         /*
2286          * Unreference resources outside of the cmdbuf_mutex to
2287          * avoid deadlocks in resource destruction paths.
2288          */
2289         vmw_resource_list_unreference(&resource_list);
2290         if (unlikely(error_resource != NULL))
2291                 vmw_resource_unreference(&error_resource);
2292
2293         return ret;
2294 }
2295
2296 /**
2297  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2298  *
2299  * @dev_priv: The device private structure.
2300  *
2301  * This function is called to idle the fifo and unpin the query buffer
2302  * if the normal way to do this hits an error, which should typically be
2303  * extremely rare.
2304  */
2305 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2306 {
2307         DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2308
2309         (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2310         vmw_bo_pin(dev_priv->pinned_bo, false);
2311         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2312         dev_priv->dummy_query_bo_pinned = false;
2313 }
2314
2315
2316 /**
2317  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2318  * query bo.
2319  *
2320  * @dev_priv: The device private structure.
2321  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2322  * _after_ a query barrier that flushes all queries touching the current
2323  * buffer pointed to by @dev_priv->pinned_bo
2324  *
2325  * This function should be used to unpin the pinned query bo, or
2326  * as a query barrier when we need to make sure that all queries have
2327  * finished before the next fifo command. (For example on hardware
2328  * context destructions where the hardware may otherwise leak unfinished
2329  * queries).
2330  *
2331  * This function does not return any failure codes, but make attempts
2332  * to do safe unpinning in case of errors.
2333  *
2334  * The function will synchronize on the previous query barrier, and will
2335  * thus not finish until that barrier has executed.
2336  *
2337  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2338  * before calling this function.
2339  */
2340 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2341                                      struct vmw_fence_obj *fence)
2342 {
2343         int ret = 0;
2344         struct list_head validate_list;
2345         struct ttm_validate_buffer pinned_val, query_val;
2346         struct vmw_fence_obj *lfence = NULL;
2347         struct ww_acquire_ctx ticket;
2348
2349         if (dev_priv->pinned_bo == NULL)
2350                 goto out_unlock;
2351
2352         INIT_LIST_HEAD(&validate_list);
2353
2354         pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2355         list_add_tail(&pinned_val.head, &validate_list);
2356
2357         query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2358         list_add_tail(&query_val.head, &validate_list);
2359
2360         do {
2361                 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2362         } while (ret == -ERESTARTSYS);
2363
2364         if (unlikely(ret != 0)) {
2365                 vmw_execbuf_unpin_panic(dev_priv);
2366                 goto out_no_reserve;
2367         }
2368
2369         if (dev_priv->query_cid_valid) {
2370                 BUG_ON(fence != NULL);
2371                 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2372                 if (unlikely(ret != 0)) {
2373                         vmw_execbuf_unpin_panic(dev_priv);
2374                         goto out_no_emit;
2375                 }
2376                 dev_priv->query_cid_valid = false;
2377         }
2378
2379         vmw_bo_pin(dev_priv->pinned_bo, false);
2380         vmw_bo_pin(dev_priv->dummy_query_bo, false);
2381         dev_priv->dummy_query_bo_pinned = false;
2382
2383         if (fence == NULL) {
2384                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2385                                                   NULL);
2386                 fence = lfence;
2387         }
2388         ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2389         if (lfence != NULL)
2390                 vmw_fence_obj_unreference(&lfence);
2391
2392         ttm_bo_unref(&query_val.bo);
2393         ttm_bo_unref(&pinned_val.bo);
2394         ttm_bo_unref(&dev_priv->pinned_bo);
2395
2396 out_unlock:
2397         return;
2398
2399 out_no_emit:
2400         ttm_eu_backoff_reservation(&ticket, &validate_list);
2401 out_no_reserve:
2402         ttm_bo_unref(&query_val.bo);
2403         ttm_bo_unref(&pinned_val.bo);
2404         ttm_bo_unref(&dev_priv->pinned_bo);
2405 }
2406
2407 /**
2408  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2409  * query bo.
2410  *
2411  * @dev_priv: The device private structure.
2412  *
2413  * This function should be used to unpin the pinned query bo, or
2414  * as a query barrier when we need to make sure that all queries have
2415  * finished before the next fifo command. (For example on hardware
2416  * context destructions where the hardware may otherwise leak unfinished
2417  * queries).
2418  *
2419  * This function does not return any failure codes, but make attempts
2420  * to do safe unpinning in case of errors.
2421  *
2422  * The function will synchronize on the previous query barrier, and will
2423  * thus not finish until that barrier has executed.
2424  */
2425 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2426 {
2427         mutex_lock(&dev_priv->cmdbuf_mutex);
2428         if (dev_priv->query_cid_valid)
2429                 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2430         mutex_unlock(&dev_priv->cmdbuf_mutex);
2431 }
2432
2433
2434 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2435                       struct drm_file *file_priv)
2436 {
2437         struct vmw_private *dev_priv = vmw_priv(dev);
2438         struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2439         struct vmw_master *vmaster = vmw_master(file_priv->master);
2440         int ret;
2441
2442         /*
2443          * This will allow us to extend the ioctl argument while
2444          * maintaining backwards compatibility:
2445          * We take different code paths depending on the value of
2446          * arg->version.
2447          */
2448
2449         if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2450                 DRM_ERROR("Incorrect execbuf version.\n");
2451                 DRM_ERROR("You're running outdated experimental "
2452                           "vmwgfx user-space drivers.");
2453                 return -EINVAL;
2454         }
2455
2456         ret = ttm_read_lock(&vmaster->lock, true);
2457         if (unlikely(ret != 0))
2458                 return ret;
2459
2460         ret = vmw_execbuf_process(file_priv, dev_priv,
2461                                   (void __user *)(unsigned long)arg->commands,
2462                                   NULL, arg->command_size, arg->throttle_us,
2463                                   (void __user *)(unsigned long)arg->fence_rep,
2464                                   NULL);
2465
2466         if (unlikely(ret != 0))
2467                 goto out_unlock;
2468
2469         vmw_kms_cursor_post_execbuf(dev_priv);
2470
2471 out_unlock:
2472         ttm_read_unlock(&vmaster->lock);
2473         return ret;
2474 }