static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- uint32_t sid)
+ uint32_t *sid)
{
- if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
- sid != SVGA3D_INVALID_ID)) {
- int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
+ if (*sid == SVGA3D_INVALID_ID)
+ return 0;
+
+ if (unlikely((!sw_context->sid_valid ||
+ *sid != sw_context->last_sid))) {
+ int real_id;
+ int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+ *sid, &real_id);
if (unlikely(ret != 0)) {
- DRM_ERROR("Could ot find or use surface %u\n",
- (unsigned) sid);
+ DRM_ERROR("Could ot find or use surface 0x%08x "
+ "address 0x%08lx\n",
+ (unsigned int) *sid,
+ (unsigned long) sid);
return ret;
}
- sw_context->last_sid = sid;
+ sw_context->last_sid = *sid;
sw_context->sid_valid = true;
- }
+ *sid = real_id;
+ sw_context->sid_translation = real_id;
+ } else
+ *sid = sw_context->sid_translation;
+
return 0;
}
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+ return ret;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
+ ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
if (unlikely(ret != 0))
return ret;
- return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
- return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
- return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
+ return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
}
static int vmw_cmd_dma(struct vmw_private *dev_priv,
uint32_t cur_validate_node;
struct ttm_validate_buffer *val_buf;
-
cmd = container_of(header, struct vmw_dma_cmd, header);
- ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
- if (unlikely(ret != 0))
- return ret;
-
handle = cmd->dma.guest.ptr.gmrId;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
++sw_context->cur_val_buf;
}
- ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
- cmd->dma.host.sid, &srf);
+ ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+ cmd->dma.host.sid, &srf);
if (ret) {
DRM_ERROR("could not find surface\n");
goto out_no_reloc;
}
+ /**
+ * Patch command stream with device SID.
+ */
+
+ cmd->dma.host.sid = srf->res.id;
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
vmw_surface_unreference(&srf);
out_no_reloc:
return ret;
}
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_draw_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDrawPrimitives body;
+ } *cmd;
+ SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+ (unsigned long)header + sizeof(*cmd));
+ SVGA3dPrimitiveRange *range;
+ uint32_t i;
+ uint32_t maxnum;
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ cmd = container_of(header, struct vmw_draw_cmd, header);
+ maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+ if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+ DRM_ERROR("Illegal number of vertex declarations.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &decl->array.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ maxnum = (header->size - sizeof(cmd->body) -
+ cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+ if (unlikely(cmd->body.numRanges > maxnum)) {
+ DRM_ERROR("Illegal number of index ranges.\n");
+ return -EINVAL;
+ }
+
+ range = (SVGA3dPrimitiveRange *) decl;
+ for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &range->indexArray.surfaceId);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+ return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_tex_state_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetTextureState state;
+ };
+
+ SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+ ((unsigned long) header + header->size + sizeof(header));
+ SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+ ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ int ret;
+
+ ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+ if (unlikely(ret != 0))
+ return ret;
+
+ for (; cur_state < last_state; ++cur_state) {
+ if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+ continue;
+
+ ret = vmw_cmd_sid_check(dev_priv, sw_context,
+ &cur_state->value);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
&vmw_cmd_set_render_target_check),
- VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
- VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
+ VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
void *buf, uint32_t *size)
{
uint32_t cmd_id;
+ uint32_t size_remaining = *size;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
+ if (unlikely(*size > size_remaining))
+ goto out_err;
+
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
goto out_err;
int ret;
while (cur_size > 0) {
+ size = cur_size;
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
if (unlikely(ret != 0))
return ret;
kfree(user_srf);
}
-int vmw_user_surface_lookup(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int sid, struct vmw_surface **out)
+int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle, struct vmw_surface **out)
{
struct vmw_resource *res;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
- res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid);
- if (unlikely(res == NULL))
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
return -EINVAL;
- if (res->res_free != &vmw_user_surface_free)
- return -EINVAL;
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
- srf = container_of(res, struct vmw_surface, res);
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (user_srf->base.tfile != tfile && !user_srf->base.shareable)
- return -EPERM;
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
+ res = &srf->res;
+
+ read_lock(&dev_priv->resource_lock);
+
+ if (!res->avail || res->res_free != &vmw_user_surface_free) {
+ read_unlock(&dev_priv->resource_lock);
+ goto out_bad_resource;
+ }
+
+ kref_get(&res->kref);
+ read_unlock(&dev_priv->resource_lock);
*out = srf;
- return 0;
+ ret = 0;
+
+out_bad_resource:
+ ttm_base_object_unref(&base);
+
+ return ret;
}
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_surface_free) {
- ret = -EINVAL;
- goto out;
- }
- srf = container_of(res, struct vmw_surface, res);
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
- TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
+ return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
}
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
srf->snooper.crtc = NULL;
- rep->sid = res->id;
+ rep->sid = user_srf->base.hash.key;
+ if (rep->sid == SVGA3D_INVALID_ID)
+ DRM_ERROR("Created bad Surface ID.\n");
+
vmw_resource_unreference(&res);
return 0;
out_err1:
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data;
struct drm_vmw_surface_arg *req = &arg->req;
struct drm_vmw_surface_create_req *rep = &arg->rep;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_resource *res;
struct vmw_surface *srf;
struct vmw_user_surface *user_srf;
struct drm_vmw_size __user *user_sizes;
- int ret;
+ struct ttm_base_object *base;
+ int ret = -EINVAL;
- res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid);
- if (unlikely(res == NULL))
+ base = ttm_base_object_lookup(tfile, req->sid);
+ if (unlikely(base == NULL)) {
+ DRM_ERROR("Could not find surface to reference.\n");
return -EINVAL;
-
- if (res->res_free != &vmw_user_surface_free) {
- ret = -EINVAL;
- goto out;
}
- srf = container_of(res, struct vmw_surface, res);
- user_srf = container_of(srf, struct vmw_user_surface, srf);
- if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
- DRM_ERROR("Tried to reference none shareable surface\n");
- ret = -EPERM;
- goto out;
- }
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_resource;
+
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ srf = &user_srf->srf;
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
- goto out;
+ goto out_no_reference;
}
rep->flags = srf->flags;
if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes,
srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
+ if (unlikely(ret != 0))
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
- /**
- * FIXME: Unreference surface here?
- */
- goto out;
- }
-out:
- vmw_resource_unreference(&res);
+out_bad_resource:
+out_no_reference:
+ ttm_base_object_unref(&base);
+
return ret;
}
int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
- int id)
+ uint32_t handle, int *id)
{
- struct vmw_resource *res;
- int ret = 0;
+ struct ttm_base_object *base;
+ struct vmw_user_surface *user_srf;
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->surface_idr, id);
- if (res && res->avail) {
- struct vmw_surface *srf =
- container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *usrf =
- container_of(srf, struct vmw_user_surface, srf);
+ int ret = -EPERM;
- if (usrf->base.tfile != tfile && !usrf->base.shareable)
- ret = -EPERM;
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
+
+ if (unlikely(base->object_type != VMW_RES_SURFACE))
+ goto out_bad_surface;
+ user_srf = container_of(base, struct vmw_user_surface, base);
+ *id = user_srf->srf.res.id;
+ ret = 0;
+
+out_bad_surface:
+ /**
+ * FIXME: May deadlock here when called from the
+ * command parsing code.
+ */
+
+ ttm_base_object_unref(&base);
return ret;
}