&args->handle);
}
-static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_omap_gem_cpu_prep *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
-
- obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!obj)
- return -ENOENT;
-
- ret = omap_gem_op_sync(obj, args->op);
-
- if (!ret)
- ret = omap_gem_op_start(obj, args->op);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
-static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_omap_gem_cpu_fini *args = data;
- struct drm_gem_object *obj;
- int ret;
-
- VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
-
- obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!obj)
- return -ENOENT;
-
- /* XXX flushy, flushy */
- ret = 0;
-
- if (!ret)
- ret = omap_gem_op_finish(obj, args->op);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return ret;
-}
-
static int ioctl_gem_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep,
+ /* Deprecated, to be removed. */
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini,
+ /* Deprecated, to be removed. */
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, drm_noop,
DRM_AUTH | DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info,
DRM_AUTH | DRM_RENDER_ALLOW),
* Virtual address, if mapped.
*/
void *vaddr;
-
- /**
- * sync-object allocated on demand (if needed)
- *
- * Per-buffer sync-object for tracking pending and completed hw/dma
- * read and write operations.
- */
- struct {
- uint32_t write_pending;
- uint32_t write_complete;
- uint32_t read_pending;
- uint32_t read_complete;
- } *sync;
};
#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
#endif
/* -----------------------------------------------------------------------------
- * Buffer Synchronization
- */
-
-static DEFINE_SPINLOCK(sync_lock);
-
-struct omap_gem_sync_waiter {
- struct list_head list;
- struct omap_gem_object *omap_obj;
- enum omap_gem_op op;
- uint32_t read_target, write_target;
- /* notify called w/ sync_lock held */
- void (*notify)(void *arg);
- void *arg;
-};
-
-/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
- * the read and/or write target count is achieved which can call a user
- * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
- * cpu access), etc.
- */
-static LIST_HEAD(waiters);
-
-static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
-{
- struct omap_gem_object *omap_obj = waiter->omap_obj;
- if ((waiter->op & OMAP_GEM_READ) &&
- (omap_obj->sync->write_complete < waiter->write_target))
- return true;
- if ((waiter->op & OMAP_GEM_WRITE) &&
- (omap_obj->sync->read_complete < waiter->read_target))
- return true;
- return false;
-}
-
-/* macro for sync debug.. */
-#define SYNCDBG 0
-#define SYNC(fmt, ...) do { if (SYNCDBG) \
- pr_err("%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__); \
- } while (0)
-
-
-static void sync_op_update(void)
-{
- struct omap_gem_sync_waiter *waiter, *n;
- list_for_each_entry_safe(waiter, n, &waiters, list) {
- if (!is_waiting(waiter)) {
- list_del(&waiter->list);
- SYNC("notify: %p", waiter);
- waiter->notify(waiter->arg);
- kfree(waiter);
- }
- }
-}
-
-static inline int sync_op(struct drm_gem_object *obj,
- enum omap_gem_op op, bool start)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
-
- spin_lock(&sync_lock);
-
- if (!omap_obj->sync) {
- omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
- if (!omap_obj->sync) {
- ret = -ENOMEM;
- goto unlock;
- }
- }
-
- if (start) {
- if (op & OMAP_GEM_READ)
- omap_obj->sync->read_pending++;
- if (op & OMAP_GEM_WRITE)
- omap_obj->sync->write_pending++;
- } else {
- if (op & OMAP_GEM_READ)
- omap_obj->sync->read_complete++;
- if (op & OMAP_GEM_WRITE)
- omap_obj->sync->write_complete++;
- sync_op_update();
- }
-
-unlock:
- spin_unlock(&sync_lock);
-
- return ret;
-}
-
-/* mark the start of read and/or write operation */
-int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- return sync_op(obj, op, true);
-}
-
-int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- return sync_op(obj, op, false);
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(sync_event);
-
-static void sync_notify(void *arg)
-{
- struct task_struct **waiter_task = arg;
- *waiter_task = NULL;
- wake_up_all(&sync_event);
-}
-
-int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- int ret = 0;
- if (omap_obj->sync) {
- struct task_struct *waiter_task = current;
- struct omap_gem_sync_waiter *waiter =
- kzalloc(sizeof(*waiter), GFP_KERNEL);
-
- if (!waiter)
- return -ENOMEM;
-
- waiter->omap_obj = omap_obj;
- waiter->op = op;
- waiter->read_target = omap_obj->sync->read_pending;
- waiter->write_target = omap_obj->sync->write_pending;
- waiter->notify = sync_notify;
- waiter->arg = &waiter_task;
-
- spin_lock(&sync_lock);
- if (is_waiting(waiter)) {
- SYNC("waited: %p", waiter);
- list_add_tail(&waiter->list, &waiters);
- spin_unlock(&sync_lock);
- ret = wait_event_interruptible(sync_event,
- (waiter_task == NULL));
- spin_lock(&sync_lock);
- if (waiter_task) {
- SYNC("interrupted: %p", waiter);
- /* we were interrupted */
- list_del(&waiter->list);
- waiter_task = NULL;
- } else {
- /* freed in sync_op_update() */
- waiter = NULL;
- }
- }
- spin_unlock(&sync_lock);
- kfree(waiter);
- }
- return ret;
-}
-
-/* call fxn(arg), either synchronously or asynchronously if the op
- * is currently blocked.. fxn() can be called from any context
- *
- * (TODO for now fxn is called back from whichever context calls
- * omap_gem_op_finish().. but this could be better defined later
- * if needed)
- *
- * TODO more code in common w/ _sync()..
- */
-int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
- void (*fxn)(void *arg), void *arg)
-{
- struct omap_gem_object *omap_obj = to_omap_bo(obj);
- if (omap_obj->sync) {
- struct omap_gem_sync_waiter *waiter =
- kzalloc(sizeof(*waiter), GFP_ATOMIC);
-
- if (!waiter)
- return -ENOMEM;
-
- waiter->omap_obj = omap_obj;
- waiter->op = op;
- waiter->read_target = omap_obj->sync->read_pending;
- waiter->write_target = omap_obj->sync->write_pending;
- waiter->notify = fxn;
- waiter->arg = arg;
-
- spin_lock(&sync_lock);
- if (is_waiting(waiter)) {
- SYNC("waited: %p", waiter);
- list_add_tail(&waiter->list, &waiters);
- spin_unlock(&sync_lock);
- return 0;
- }
-
- spin_unlock(&sync_lock);
-
- kfree(waiter);
- }
-
- /* no waiting.. */
- fxn(arg);
-
- return 0;
-}
-
-/* -----------------------------------------------------------------------------
* Constructor & Destructor
*/
drm_prime_gem_destroy(obj, omap_obj->sgt);
}
- kfree(omap_obj->sync);
-
drm_gem_object_release(obj);
kfree(omap_obj);