2 * Copyright 2007 Jérôme Glisse
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include "radeon_ms.h"
30 static inline void amd_cmd_bo_cleanup(struct drm_device *dev,
33 struct amd_cmd_bo *bo;
35 mutex_lock(&dev->struct_mutex);
36 list_for_each_entry(bo, &cmd->bo_unused.list, list) {
37 drm_bo_usage_deref_locked(&bo->bo);
39 list_for_each_entry(bo, &cmd->bo_used.list, list) {
40 drm_bo_usage_deref_locked(&bo->bo);
42 mutex_unlock(&dev->struct_mutex);
45 static inline int amd_cmd_bo_validate(struct drm_device *dev,
46 struct drm_file *file,
47 struct amd_cmd_bo *cmd_bo,
48 struct drm_amd_cmd_bo *bo,
53 /* validate only cmd indirect or data bo */
55 case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
56 case DRM_AMD_CMD_BO_TYPE_DATA:
57 case DRM_AMD_CMD_BO_TYPE_CMD_RING:
58 /* FIXME: make sure userspace can no longer map the bo */
63 /* check that buffer operation is validate */
64 if (bo->op_req.op != drm_bo_validate) {
65 DRM_ERROR("buffer 0x%x object operation is not validate.\n",
70 memset(&bo->op_rep, 0, sizeof(struct drm_bo_arg_rep));
71 ret = drm_bo_handle_validate(file,
72 bo->op_req.bo_req.handle,
73 bo->op_req.bo_req.flags,
74 bo->op_req.bo_req.mask,
75 bo->op_req.bo_req.hint,
76 bo->op_req.bo_req.fence_class,
80 DRM_ERROR("validate error %d for 0x%08x\n",
84 if (copy_to_user((void __user *)((unsigned)data), bo,
85 sizeof(struct drm_amd_cmd_bo))) {
86 DRM_ERROR("failed to copy to user validate result of 0x%08x\n",
93 static int amd_cmd_parse_cmd_bo(struct drm_device *dev,
94 struct drm_file *file,
95 struct drm_amd_cmd *drm_amd_cmd,
98 struct drm_amd_cmd_bo drm_amd_cmd_bo;
99 struct amd_cmd_bo *cmd_bo;
100 uint32_t bo_count = 0;
101 uint64_t data = drm_amd_cmd->bo;
105 /* check we don't have more buffer than announced */
106 if (bo_count >= drm_amd_cmd->bo_count) {
107 DRM_ERROR("cmd bo count exceeded got %d waited %d\n.",
108 bo_count, drm_amd_cmd->bo_count);
111 /* initialize amd_cmd_bo */
112 cmd_bo = &cmd->bo[bo_count];
113 INIT_LIST_HEAD(&cmd_bo->list);
115 /* copy from userspace */
116 if (copy_from_user(&drm_amd_cmd_bo,
117 (void __user *)((unsigned)data),
118 sizeof(struct drm_amd_cmd_bo))) {
121 /* collect informations */
122 cmd_bo->type = drm_amd_cmd_bo.type;
123 cmd_bo->mask = drm_amd_cmd_bo.op_req.bo_req.mask;
124 cmd_bo->flags = drm_amd_cmd_bo.op_req.bo_req.flags;
125 cmd_bo->handle = drm_amd_cmd_bo.op_req.arg_handle;
127 mutex_lock(&dev->struct_mutex);
128 cmd_bo->bo = drm_lookup_buffer_object(file, cmd_bo->handle, 1);
129 mutex_unlock(&dev->struct_mutex);
130 if (cmd_bo->bo == NULL) {
131 DRM_ERROR("unknown bo handle 0x%x\n", cmd_bo->handle);
134 /* validate buffer if necessary */
135 ret = amd_cmd_bo_validate(dev, file, cmd_bo,
136 &drm_amd_cmd_bo, data);
138 mutex_lock(&dev->struct_mutex);
139 drm_bo_usage_deref_locked(&cmd_bo->bo);
140 mutex_unlock(&dev->struct_mutex);
143 /* inspect bo type */
144 switch (cmd_bo->type) {
145 case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
146 /* add it so we properly unreference in case of error */
147 list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
149 case DRM_AMD_CMD_BO_TYPE_DATA:
150 /* add to unused list */
151 list_add_tail(&cmd_bo->list, &cmd->bo_unused.list);
153 case DRM_AMD_CMD_BO_TYPE_CMD_RING:
155 list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
156 cmd->cdw_bo = cmd_bo;
159 mutex_lock(&dev->struct_mutex);
160 drm_bo_usage_deref_locked(&cmd_bo->bo);
161 mutex_unlock(&dev->struct_mutex);
162 DRM_ERROR("unknow bo 0x%x unknown type 0x%x in cmd\n",
163 cmd_bo->handle, cmd_bo->type);
167 data = drm_amd_cmd_bo.next;
170 if (bo_count != drm_amd_cmd->bo_count) {
171 DRM_ERROR("not enought buffer got %d expected %d\n.",
172 bo_count, drm_amd_cmd->bo_count);
178 static int amd_cmd_packet0_check(struct drm_device *dev,
182 struct drm_radeon_private *dev_priv = dev->dev_private;
183 uint32_t reg, count, r, i;
186 reg = cmd->cdw[*cdw_id] & PACKET0_REG_MASK;
187 count = (cmd->cdw[*cdw_id] & PACKET0_COUNT_MASK) >> PACKET0_COUNT_SHIFT;
188 if (reg + count > dev_priv->cmd_module.numof_p0_checkers) {
189 DRM_ERROR("0x%08X registers is above last accepted registers\n",
193 for (r = reg, i = 0; i <= count; i++, r++) {
194 if (dev_priv->cmd_module.check_p0[r] == NULL) {
197 if (dev_priv->cmd_module.check_p0[r] == (void *)-1) {
198 DRM_ERROR("register 0x%08X (at %d) is forbidden\n",
199 r << 2, (*cdw_id) + i + 1);
202 ret = dev_priv->cmd_module.check_p0[r](dev, cmd,
203 (*cdw_id) + i + 1, r);
208 /* header + N + 1 dword passed test */
209 (*cdw_id) += count + 2;
213 static int amd_cmd_packet3_check(struct drm_device *dev,
217 struct drm_radeon_private *dev_priv = dev->dev_private;
218 uint32_t opcode, count;
221 opcode = (cmd->cdw[*cdw_id] & PACKET3_OPCODE_MASK) >>
222 PACKET3_OPCODE_SHIFT;
223 if (opcode > dev_priv->cmd_module.numof_p3_checkers) {
224 DRM_ERROR("0x%08X opcode is above last accepted opcodes\n",
228 count = (cmd->cdw[*cdw_id] & PACKET3_COUNT_MASK) >> PACKET3_COUNT_SHIFT;
229 if (dev_priv->cmd_module.check_p3[opcode] == NULL) {
230 DRM_ERROR("0x%08X opcode is forbidden\n", opcode);
233 ret = dev_priv->cmd_module.check_p3[opcode](dev, cmd,
234 (*cdw_id) + 1, opcode,
239 /* header + N + 1 dword passed test */
240 (*cdw_id) += count + 2;
244 int amd_cmd_check(struct drm_device *dev, struct amd_cmd *cmd)
249 for (i = 0; i < cmd->cdw_count;) {
250 switch (PACKET_HEADER_GET(cmd->cdw[i])) {
252 ret = amd_cmd_packet0_check(dev, cmd, &i);
258 /* we don't accept packet 1 */
261 /* FIXME: accept packet 2 */
264 ret = amd_cmd_packet3_check(dev, cmd, &i);
274 static int amd_ioctl_cmd_cleanup(struct drm_device *dev,
275 struct drm_file *file,
279 /* check if we need to unfence object */
280 if (r && (!list_empty(&cmd->bo_unused.list) ||
281 !list_empty(&cmd->bo_unused.list))) {
282 drm_putback_buffer_objects(dev);
285 drm_bo_kunmap(&cmd->cdw_kmap);
288 /* derefence buffer as lookup reference them */
289 amd_cmd_bo_cleanup(dev, cmd);
292 cmd->bo_count * sizeof(struct amd_cmd_bo),
296 drm_bo_read_unlock(&dev->bm.bm_lock);
300 int amd_ioctl_cmd(struct drm_device *dev, void *data, struct drm_file *file)
302 struct drm_radeon_private *dev_priv = dev->dev_private;
303 struct drm_amd_cmd *drm_amd_cmd = data;
304 struct drm_fence_arg *fence_arg = &drm_amd_cmd->fence_arg;
305 struct drm_fence_object *fence;
310 /* check that we have a command checker */
311 if (dev_priv->cmd_module.check == NULL) {
312 DRM_ERROR("invalid command checker module.\n");
315 /* command dword count must be >= 0 */
316 if (drm_amd_cmd->cdw_count == 0) {
317 DRM_ERROR("command dword count is 0.\n");
321 /* FIXME: Lock buffer manager. This is needed so the X server can
322 * block DRI clients while VT switched. The X server will then
323 * take the lock in write mode
326 ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
329 /* FIXME: ret can be -EAGAIN here,
330 * which really isn't an error.
333 DRM_ERROR("bo read locking failed.\n");
336 /* cleanup & initialize amd cmd structure */
337 memset(&cmd, 0, sizeof(struct amd_cmd));
338 cmd.bo_count = drm_amd_cmd->bo_count;
339 INIT_LIST_HEAD(&cmd.bo_unused.list);
340 INIT_LIST_HEAD(&cmd.bo_used.list);
341 /* allocate structure for bo parsing */
342 cmd.bo = drm_calloc(cmd.bo_count, sizeof(struct amd_cmd_bo),
344 if (cmd.bo == NULL) {
345 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -ENOMEM);
348 ret = amd_cmd_parse_cmd_bo(dev, file, drm_amd_cmd, &cmd);
350 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
352 /* check that a command buffer have been found */
353 if (cmd.cdw_bo == NULL) {
354 DRM_ERROR("no command buffer submited in cmd ioctl\n");
355 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
357 /* map command buffer */
358 cmd.cdw_count = drm_amd_cmd->cdw_count;
359 cmd.cdw_size = (cmd.cdw_bo->bo->mem.num_pages * PAGE_SIZE) >> 2;
360 if (cmd.cdw_size < cmd.cdw_count) {
361 DRM_ERROR("command buffer (%d) is smaller than expected (%d)\n",
362 cmd.cdw_size, cmd.cdw_count);
363 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
365 memset(&cmd.cdw_kmap, 0, sizeof(struct drm_bo_kmap_obj));
366 ret = drm_bo_kmap(cmd.cdw_bo->bo, 0,
367 cmd.cdw_bo->bo->mem.num_pages, &cmd.cdw_kmap);
369 DRM_ERROR("error mapping command buffer\n");
370 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
372 cmd.cdw = drm_bmo_virtual(&cmd.cdw_kmap, &tmp);
373 /* do command checking */
374 ret = dev_priv->cmd_module.check(dev, &cmd);
376 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
378 /* copy command to ring */
379 ret = radeon_ms_ring_emit(dev, cmd.cdw, cmd.cdw_count);
381 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
384 ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
386 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
388 if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
389 ret = drm_fence_add_user_object(file, fence,
391 DRM_FENCE_FLAG_SHAREABLE);
393 fence_arg->handle = fence->base.hash.key;
394 fence_arg->fence_class = fence->fence_class;
395 fence_arg->type = fence->type;
396 fence_arg->signaled = fence->signaled_types;
397 fence_arg->sequence = fence->sequence;
399 DRM_ERROR("error add object fence, expect oddity !\n");
402 drm_fence_usage_deref_unlocked(&fence);
403 return amd_ioctl_cmd_cleanup(dev, file, &cmd, 0);