LUT updates
[platform/upstream/libdrm.git] / shared-core / radeon_ms_exec.c
1 /*
2  * Copyright 2007 Jérôme Glisse
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include "radeon_ms.h"
28 #include "amd.h"
29
30 static inline void amd_cmd_bo_cleanup(struct drm_device *dev,
31                                       struct amd_cmd *cmd)
32 {
33         struct amd_cmd_bo *bo;
34
35         mutex_lock(&dev->struct_mutex);
36         list_for_each_entry(bo, &cmd->bo_unused.list, list) {
37                 drm_bo_usage_deref_locked(&bo->bo);
38         }
39         list_for_each_entry(bo, &cmd->bo_used.list, list) {
40                 drm_bo_usage_deref_locked(&bo->bo);
41         }
42         mutex_unlock(&dev->struct_mutex);
43 }
44
45 static inline int amd_cmd_bo_validate(struct drm_device *dev,
46                                       struct drm_file *file,
47                                       struct amd_cmd_bo *cmd_bo,
48                                       struct drm_amd_cmd_bo *bo,
49                                       uint64_t data)
50 {
51         int ret;
52
53         /* validate only cmd indirect or data bo */
54         switch (bo->type) {
55         case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
56         case DRM_AMD_CMD_BO_TYPE_DATA:
57         case DRM_AMD_CMD_BO_TYPE_CMD_RING:
58                 /* FIXME: make sure userspace can no longer map the bo */
59                 break;
60         default:
61                 return 0;
62         }
63         /* check that buffer operation is validate */
64         if (bo->op_req.op != drm_bo_validate) {
65                 DRM_ERROR("buffer 0x%x object operation is not validate.\n",
66                           cmd_bo->handle);
67                 return -EINVAL;
68         }
69         /* validate buffer */
70         memset(&bo->op_rep, 0, sizeof(struct drm_bo_arg_rep));
71         ret = drm_bo_handle_validate(file,
72                                      bo->op_req.bo_req.handle,
73                                      bo->op_req.bo_req.flags,
74                                      bo->op_req.bo_req.mask,
75                                      bo->op_req.bo_req.hint,
76                                      bo->op_req.bo_req.fence_class,
77                                      &bo->op_rep.bo_info,
78                                      &cmd_bo->bo);
79         if (ret) {
80                 DRM_ERROR("validate error %d for 0x%08x\n",
81                           ret, cmd_bo->handle);
82                 return ret;
83         }
84         if (copy_to_user((void __user *)((unsigned)data), bo,
85                          sizeof(struct drm_amd_cmd_bo))) {
86                 DRM_ERROR("failed to copy to user validate result of 0x%08x\n",
87                           cmd_bo->handle);
88                 return -EFAULT;
89         }
90         return 0;
91 }
92
93 static int amd_cmd_parse_cmd_bo(struct drm_device *dev,
94                                 struct drm_file *file,
95                                 struct drm_amd_cmd *drm_amd_cmd,
96                                 struct amd_cmd *cmd)
97 {
98         struct drm_amd_cmd_bo drm_amd_cmd_bo;
99         struct amd_cmd_bo *cmd_bo;
100         uint32_t bo_count = 0;
101         uint64_t data = drm_amd_cmd->bo;
102         int ret = 0;
103
104         do {
105                 /* check we don't have more buffer than announced */
106                 if (bo_count >= drm_amd_cmd->bo_count) {
107                         DRM_ERROR("cmd bo count exceeded got %d waited %d\n.",
108                                   bo_count, drm_amd_cmd->bo_count);
109                         return -EINVAL;
110                 }
111                 /* initialize amd_cmd_bo */
112                 cmd_bo = &cmd->bo[bo_count];
113                 INIT_LIST_HEAD(&cmd_bo->list);
114                 cmd_bo->bo = NULL;
115                 /* copy from userspace */
116                 if (copy_from_user(&drm_amd_cmd_bo,
117                                    (void __user *)((unsigned)data),
118                                    sizeof(struct drm_amd_cmd_bo))) {
119                         return -EFAULT;
120                 }
121                 /* collect informations */
122                 cmd_bo->type = drm_amd_cmd_bo.type;
123                 cmd_bo->mask = drm_amd_cmd_bo.op_req.bo_req.mask;
124                 cmd_bo->flags = drm_amd_cmd_bo.op_req.bo_req.flags;
125                 cmd_bo->handle = drm_amd_cmd_bo.op_req.arg_handle;
126                 /* get bo objects */
127                 mutex_lock(&dev->struct_mutex);
128                 cmd_bo->bo = drm_lookup_buffer_object(file, cmd_bo->handle, 1);
129                 mutex_unlock(&dev->struct_mutex);
130                 if (cmd_bo->bo == NULL) {
131                         DRM_ERROR("unknown bo handle 0x%x\n", cmd_bo->handle);
132                         return -EINVAL;
133                 }
134                 /* validate buffer if necessary */
135                 ret = amd_cmd_bo_validate(dev, file, cmd_bo,
136                                           &drm_amd_cmd_bo, data);
137                 if (ret) {
138                         mutex_lock(&dev->struct_mutex);
139                         drm_bo_usage_deref_locked(&cmd_bo->bo);
140                         mutex_unlock(&dev->struct_mutex);
141                         return ret;
142                 }
143                 /* inspect bo type */
144                 switch (cmd_bo->type) {
145                 case DRM_AMD_CMD_BO_TYPE_CMD_INDIRECT:
146                         /* add it so we properly unreference in case of error */
147                         list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
148                         return -EINVAL;
149                 case DRM_AMD_CMD_BO_TYPE_DATA:
150                         /* add to unused list */
151                         list_add_tail(&cmd_bo->list, &cmd->bo_unused.list);
152                         break;
153                 case DRM_AMD_CMD_BO_TYPE_CMD_RING:
154                         /* set cdw_bo */
155                         list_add_tail(&cmd_bo->list, &cmd->bo_used.list);
156                         cmd->cdw_bo = cmd_bo;
157                         break;
158                 default:
159                         mutex_lock(&dev->struct_mutex);
160                         drm_bo_usage_deref_locked(&cmd_bo->bo);
161                         mutex_unlock(&dev->struct_mutex);
162                         DRM_ERROR("unknow bo 0x%x unknown type 0x%x in cmd\n",
163                                   cmd_bo->handle, cmd_bo->type);
164                         return -EINVAL;
165                 }
166                 /* ok next bo */
167                 data = drm_amd_cmd_bo.next;
168                 bo_count++;
169         } while (data != 0);
170         if (bo_count != drm_amd_cmd->bo_count) {
171                 DRM_ERROR("not enought buffer got %d expected %d\n.",
172                           bo_count, drm_amd_cmd->bo_count);
173                 return -EINVAL;
174         }
175         return 0;
176 }
177
178 static int amd_cmd_packet0_check(struct drm_device *dev,
179                                  struct amd_cmd *cmd,
180                                  int *cdw_id)
181 {
182         struct drm_radeon_private *dev_priv = dev->dev_private;
183         uint32_t reg, count, r, i;
184         int ret;
185
186         reg = cmd->cdw[*cdw_id] & PACKET0_REG_MASK;
187         count = (cmd->cdw[*cdw_id] & PACKET0_COUNT_MASK) >> PACKET0_COUNT_SHIFT;
188         if (reg + count > dev_priv->cmd_module.numof_p0_checkers) {
189                 DRM_ERROR("0x%08X registers is above last accepted registers\n",
190                           reg << 2);
191                 return -EINVAL;
192         }
193         for (r = reg, i = 0; i <= count; i++, r++) {
194                 if (dev_priv->cmd_module.check_p0[r] == NULL) {
195                         continue;
196                 }
197                 if (dev_priv->cmd_module.check_p0[r] == (void *)-1) {
198                         DRM_ERROR("register 0x%08X (at %d) is forbidden\n",
199                                  r << 2, (*cdw_id) + i + 1);
200                         return -EINVAL;
201                 }
202                 ret = dev_priv->cmd_module.check_p0[r](dev, cmd,
203                                                        (*cdw_id) + i + 1, r);
204                 if (ret) {
205                         return ret;
206                 }
207         }
208         /* header + N + 1 dword passed test */
209         (*cdw_id) += count + 2;
210         return 0;
211 }
212
213 static int amd_cmd_packet3_check(struct drm_device *dev,
214                                  struct amd_cmd *cmd,
215                                  int *cdw_id)
216 {
217         struct drm_radeon_private *dev_priv = dev->dev_private;
218         uint32_t opcode, count;
219         int ret;
220
221         opcode = (cmd->cdw[*cdw_id] & PACKET3_OPCODE_MASK) >>
222                  PACKET3_OPCODE_SHIFT;
223         if (opcode > dev_priv->cmd_module.numof_p3_checkers) {
224                 DRM_ERROR("0x%08X opcode is above last accepted opcodes\n",
225                           opcode);
226                 return -EINVAL;
227         }
228         count = (cmd->cdw[*cdw_id] & PACKET3_COUNT_MASK) >> PACKET3_COUNT_SHIFT;
229         if (dev_priv->cmd_module.check_p3[opcode] == NULL) {
230                 DRM_ERROR("0x%08X opcode is forbidden\n", opcode);
231                 return -EINVAL;
232         }
233         ret = dev_priv->cmd_module.check_p3[opcode](dev, cmd,
234                                                     (*cdw_id) + 1, opcode,
235                                                     count);
236         if (ret) {
237                 return ret;
238         }
239         /* header + N + 1 dword passed test */
240         (*cdw_id) += count + 2;
241         return 0;
242 }
243
244 int amd_cmd_check(struct drm_device *dev, struct amd_cmd *cmd)
245 {
246         uint32_t i;
247         int ret;
248
249         for (i = 0; i < cmd->cdw_count;) {
250                 switch (PACKET_HEADER_GET(cmd->cdw[i])) {
251                 case 0:
252                         ret = amd_cmd_packet0_check(dev, cmd, &i);
253                         if (ret) {
254                                 return ret;
255                         }
256                         break;
257                 case 1:
258                         /* we don't accept packet 1 */
259                         return -EINVAL;
260                 case 2:
261                         /* FIXME: accept packet 2 */
262                         return -EINVAL;
263                 case 3:
264                         ret = amd_cmd_packet3_check(dev, cmd, &i);
265                         if (ret) {
266                                 return ret;
267                         }
268                         break;
269                 }
270         }
271         return 0;
272 }
273
274 static int amd_ioctl_cmd_cleanup(struct drm_device *dev,
275                                  struct drm_file *file,
276                                  struct amd_cmd *cmd,
277                                  int r)
278 {
279         /* check if we need to unfence object */
280         if (r && (!list_empty(&cmd->bo_unused.list) ||
281                   !list_empty(&cmd->bo_unused.list))) {
282                 drm_putback_buffer_objects(dev);                
283         }
284         if (cmd->cdw) {
285                 drm_bo_kunmap(&cmd->cdw_kmap);
286                 cmd->cdw = NULL;
287         }
288         /* derefence buffer as lookup reference them */
289         amd_cmd_bo_cleanup(dev, cmd);
290         if (cmd->bo) {
291                 drm_free(cmd->bo,
292                          cmd->bo_count * sizeof(struct amd_cmd_bo),
293                          DRM_MEM_DRIVER);
294                 cmd->bo = NULL;
295         }
296         drm_bo_read_unlock(&dev->bm.bm_lock);
297         return r;
298 }
299
300 int amd_ioctl_cmd(struct drm_device *dev, void *data, struct drm_file *file)
301 {
302         struct drm_radeon_private *dev_priv = dev->dev_private;
303         struct drm_amd_cmd *drm_amd_cmd = data;
304         struct drm_fence_arg *fence_arg = &drm_amd_cmd->fence_arg;
305         struct drm_fence_object *fence;
306         struct amd_cmd cmd;
307         int tmp;
308         int ret;
309
310         /* check that we have a command checker */
311         if (dev_priv->cmd_module.check == NULL) {
312                 DRM_ERROR("invalid command checker module.\n");
313                 return -EFAULT;
314         }
315         /* command dword count must be >= 0 */
316         if (drm_amd_cmd->cdw_count == 0) {
317                 DRM_ERROR("command dword count is 0.\n");
318                 return -EINVAL;
319         }
320
321         /* FIXME: Lock buffer manager. This is needed so the X server can
322          * block DRI clients while VT switched. The X server will then 
323          * take the lock in write mode
324          */
325
326         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
327         if (ret) {
328
329                 /* FIXME: ret can be -EAGAIN here, 
330                  * which really isn't an error. 
331                  */
332
333                 DRM_ERROR("bo read locking failed.\n");
334                 return ret;
335         }
336         /* cleanup & initialize amd cmd structure */
337         memset(&cmd, 0, sizeof(struct amd_cmd));
338         cmd.bo_count = drm_amd_cmd->bo_count;
339         INIT_LIST_HEAD(&cmd.bo_unused.list);
340         INIT_LIST_HEAD(&cmd.bo_used.list);
341         /* allocate structure for bo parsing */
342         cmd.bo = drm_calloc(cmd.bo_count, sizeof(struct amd_cmd_bo),
343                             DRM_MEM_DRIVER);
344         if (cmd.bo == NULL) {
345                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -ENOMEM);
346         }
347         /* parse cmd bo */
348         ret = amd_cmd_parse_cmd_bo(dev, file, drm_amd_cmd, &cmd);
349         if (ret) {
350                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
351         }
352         /* check that a command buffer have been found */
353         if (cmd.cdw_bo == NULL) {
354                 DRM_ERROR("no command buffer submited in cmd ioctl\n");
355                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
356         }
357         /* map command buffer */
358         cmd.cdw_count = drm_amd_cmd->cdw_count;
359         cmd.cdw_size = (cmd.cdw_bo->bo->mem.num_pages * PAGE_SIZE) >> 2;
360         if (cmd.cdw_size < cmd.cdw_count) {
361                 DRM_ERROR("command buffer (%d) is smaller than expected (%d)\n",
362                           cmd.cdw_size, cmd.cdw_count);
363                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, -EINVAL);
364         }
365         memset(&cmd.cdw_kmap, 0, sizeof(struct drm_bo_kmap_obj));
366         ret = drm_bo_kmap(cmd.cdw_bo->bo, 0,
367                           cmd.cdw_bo->bo->mem.num_pages, &cmd.cdw_kmap);
368         if (ret) {
369                 DRM_ERROR("error mapping command buffer\n");
370                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
371         }
372         cmd.cdw = drm_bmo_virtual(&cmd.cdw_kmap, &tmp);
373         /* do command checking */
374         ret = dev_priv->cmd_module.check(dev, &cmd);
375         if (ret) {
376                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
377         }
378         /* copy command to ring */
379         ret = radeon_ms_ring_emit(dev, cmd.cdw, cmd.cdw_count);
380         if (ret) {
381                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
382         }
383         /* fence */
384         ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence);
385         if (ret) {
386                 return amd_ioctl_cmd_cleanup(dev, file, &cmd, ret);
387         }
388         if (!(fence_arg->flags & DRM_FENCE_FLAG_NO_USER)) {
389                 ret = drm_fence_add_user_object(file, fence,
390                                                 fence_arg->flags &
391                                                 DRM_FENCE_FLAG_SHAREABLE);
392                 if (!ret) {
393                         fence_arg->handle = fence->base.hash.key;
394                         fence_arg->fence_class = fence->fence_class;
395                         fence_arg->type = fence->type;
396                         fence_arg->signaled = fence->signaled_types;
397                         fence_arg->sequence = fence->sequence;
398                 } else {
399                         DRM_ERROR("error add object fence, expect oddity !\n");
400                 }
401         }
402         drm_fence_usage_deref_unlocked(&fence);
403         return amd_ioctl_cmd_cleanup(dev, file, &cmd, 0);
404 }