2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
23 * Cmdstream submission:
26 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
27 #define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */
28 #define BO_LOCKED 0x4000
29 #define BO_PINNED 0x2000
31 static struct msm_gem_submit *submit_create(struct drm_device *dev,
32 struct msm_gpu *gpu, int nr_bos, int nr_cmds)
34 struct msm_gem_submit *submit;
35 int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
36 (nr_cmds * sizeof(*submit->cmd));
38 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
45 submit->pid = get_pid(task_pid(current));
46 submit->cmd = (void *)&submit->bos[nr_bos];
48 /* initially, until copy_from_user() and bo lookup succeeds: */
52 INIT_LIST_HEAD(&submit->node);
53 INIT_LIST_HEAD(&submit->bo_list);
54 ww_acquire_init(&submit->ticket, &reservation_ww_class);
59 void msm_gem_submit_free(struct msm_gem_submit *submit)
61 fence_put(submit->fence);
62 list_del(&submit->node);
67 static inline unsigned long __must_check
68 copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
70 if (access_ok(VERIFY_READ, from, n))
71 return __copy_from_user_inatomic(to, from, n);
75 static int submit_lookup_objects(struct msm_gem_submit *submit,
76 struct drm_msm_gem_submit *args, struct drm_file *file)
81 spin_lock(&file->table_lock);
84 for (i = 0; i < args->nr_bos; i++) {
85 struct drm_msm_gem_submit_bo submit_bo;
86 struct drm_gem_object *obj;
87 struct msm_gem_object *msm_obj;
88 void __user *userptr =
89 u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
91 /* make sure we don't have garbage flags, in case we hit
92 * error path before flags is initialized:
94 submit->bos[i].flags = 0;
96 ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
99 spin_unlock(&file->table_lock);
100 ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
103 spin_lock(&file->table_lock);
107 if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
108 DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
113 submit->bos[i].flags = submit_bo.flags;
114 /* in validate_objects() we figure out if this is true: */
115 submit->bos[i].iova = submit_bo.presumed;
117 /* normally use drm_gem_object_lookup(), but for bulk lookup
118 * all under single table_lock just hit object_idr directly:
120 obj = idr_find(&file->object_idr, submit_bo.handle);
122 DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
127 msm_obj = to_msm_bo(obj);
129 if (!list_empty(&msm_obj->submit_entry)) {
130 DRM_ERROR("handle %u at index %u already on submit list\n",
131 submit_bo.handle, i);
136 drm_gem_object_reference(obj);
138 submit->bos[i].obj = msm_obj;
140 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
145 spin_unlock(&file->table_lock);
153 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
155 struct msm_gem_object *msm_obj = submit->bos[i].obj;
157 if (submit->bos[i].flags & BO_PINNED)
158 msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
160 if (submit->bos[i].flags & BO_LOCKED)
161 ww_mutex_unlock(&msm_obj->resv->lock);
163 if (!(submit->bos[i].flags & BO_VALID))
164 submit->bos[i].iova = 0;
166 submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
169 /* This is where we make sure all the bo's are reserved and pin'd: */
170 static int submit_lock_objects(struct msm_gem_submit *submit)
172 int contended, slow_locked = -1, i, ret = 0;
175 for (i = 0; i < submit->nr_bos; i++) {
176 struct msm_gem_object *msm_obj = submit->bos[i].obj;
178 if (slow_locked == i)
183 if (!(submit->bos[i].flags & BO_LOCKED)) {
184 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
188 submit->bos[i].flags |= BO_LOCKED;
192 ww_acquire_done(&submit->ticket);
198 submit_unlock_unpin_bo(submit, i);
201 submit_unlock_unpin_bo(submit, slow_locked);
203 if (ret == -EDEADLK) {
204 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
205 /* we lost out in a seqno race, lock and retry.. */
206 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
209 submit->bos[contended].flags |= BO_LOCKED;
210 slow_locked = contended;
218 static int submit_fence_sync(struct msm_gem_submit *submit)
222 for (i = 0; i < submit->nr_bos; i++) {
223 struct msm_gem_object *msm_obj = submit->bos[i].obj;
224 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
226 ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write);
234 static int submit_pin_objects(struct msm_gem_submit *submit)
238 submit->valid = true;
240 for (i = 0; i < submit->nr_bos; i++) {
241 struct msm_gem_object *msm_obj = submit->bos[i].obj;
244 /* if locking succeeded, pin bo: */
245 ret = msm_gem_get_iova_locked(&msm_obj->base,
246 submit->gpu->id, &iova);
251 submit->bos[i].flags |= BO_PINNED;
253 if (iova == submit->bos[i].iova) {
254 submit->bos[i].flags |= BO_VALID;
256 submit->bos[i].iova = iova;
257 /* iova changed, so address in cmdstream is not valid: */
258 submit->bos[i].flags &= ~BO_VALID;
259 submit->valid = false;
266 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
267 struct msm_gem_object **obj, uint32_t *iova, bool *valid)
269 if (idx >= submit->nr_bos) {
270 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
271 idx, submit->nr_bos);
276 *obj = submit->bos[idx].obj;
278 *iova = submit->bos[idx].iova;
280 *valid = !!(submit->bos[idx].flags & BO_VALID);
285 /* process the reloc's and patch up the cmdstream as needed: */
286 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
287 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
289 uint32_t i, last_offset = 0;
294 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
298 /* For now, just map the entire thing. Eventually we probably
299 * to do it page-by-page, w/ kmap() if not vmap()d..
301 ptr = msm_gem_get_vaddr_locked(&obj->base);
305 DBG("failed to map: %d", ret);
309 for (i = 0; i < nr_relocs; i++) {
310 struct drm_msm_gem_submit_reloc submit_reloc;
311 void __user *userptr =
312 u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
316 ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
320 if (submit_reloc.submit_offset % 4) {
321 DRM_ERROR("non-aligned reloc offset: %u\n",
322 submit_reloc.submit_offset);
326 /* offset in dwords: */
327 off = submit_reloc.submit_offset / 4;
329 if ((off >= (obj->base.size / 4)) ||
330 (off < last_offset)) {
331 DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
335 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
342 iova += submit_reloc.reloc_offset;
344 if (submit_reloc.shift < 0)
345 iova >>= -submit_reloc.shift;
347 iova <<= submit_reloc.shift;
349 ptr[off] = iova | submit_reloc.or;
354 msm_gem_put_vaddr_locked(&obj->base);
359 static void submit_cleanup(struct msm_gem_submit *submit)
363 for (i = 0; i < submit->nr_bos; i++) {
364 struct msm_gem_object *msm_obj = submit->bos[i].obj;
365 submit_unlock_unpin_bo(submit, i);
366 list_del_init(&msm_obj->submit_entry);
367 drm_gem_object_unreference(&msm_obj->base);
370 ww_acquire_fini(&submit->ticket);
373 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
374 struct drm_file *file)
376 struct msm_drm_private *priv = dev->dev_private;
377 struct drm_msm_gem_submit *args = data;
378 struct msm_file_private *ctx = file->driver_priv;
379 struct msm_gem_submit *submit;
380 struct msm_gpu *gpu = priv->gpu;
387 /* for now, we just have 3d pipe.. eventually this would need to
388 * be more clever to dispatch to appropriate gpu module:
390 if (args->pipe != MSM_PIPE_3D0)
393 ret = mutex_lock_interruptible(&dev->struct_mutex);
397 priv->struct_mutex_task = current;
399 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
405 ret = submit_lookup_objects(submit, args, file);
409 ret = submit_lock_objects(submit);
413 ret = submit_fence_sync(submit);
417 ret = submit_pin_objects(submit);
421 for (i = 0; i < args->nr_cmds; i++) {
422 struct drm_msm_gem_submit_cmd submit_cmd;
423 void __user *userptr =
424 u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
425 struct msm_gem_object *msm_obj;
428 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
434 /* validate input from userspace: */
435 switch (submit_cmd.type) {
436 case MSM_SUBMIT_CMD_BUF:
437 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
438 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
441 DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
446 ret = submit_bo(submit, submit_cmd.submit_idx,
447 &msm_obj, &iova, NULL);
451 if (submit_cmd.size % 4) {
452 DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
458 if ((submit_cmd.size + submit_cmd.submit_offset) >=
459 msm_obj->base.size) {
460 DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
465 submit->cmd[i].type = submit_cmd.type;
466 submit->cmd[i].size = submit_cmd.size / 4;
467 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
468 submit->cmd[i].idx = submit_cmd.submit_idx;
473 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
474 submit_cmd.nr_relocs, submit_cmd.relocs);
481 ret = msm_gpu_submit(gpu, submit, ctx);
483 args->fence = submit->fence->seqno;
486 submit_cleanup(submit);
488 msm_gem_submit_free(submit);
490 priv->struct_mutex_task = NULL;
491 mutex_unlock(&dev->struct_mutex);