static struct class *uacce_class;
static dev_t uacce_devt;
-static DEFINE_MUTEX(uacce_mutex);
static DEFINE_XARRAY_ALLOC(uacce_xa);
-static int uacce_start_queue(struct uacce_queue *q)
+/*
+ * If the parent driver or the device disappears, the queue state is invalid and
+ * ops are not usable anymore.
+ */
+static bool uacce_queue_is_valid(struct uacce_queue *q)
{
- int ret = 0;
+ return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
+}
- mutex_lock(&uacce_mutex);
+static int uacce_start_queue(struct uacce_queue *q)
+{
+ int ret;
- if (q->state != UACCE_Q_INIT) {
- ret = -EINVAL;
- goto out_with_lock;
- }
+ if (q->state != UACCE_Q_INIT)
+ return -EINVAL;
if (q->uacce->ops->start_queue) {
ret = q->uacce->ops->start_queue(q);
if (ret < 0)
- goto out_with_lock;
+ return ret;
}
q->state = UACCE_Q_STARTED;
-
-out_with_lock:
- mutex_unlock(&uacce_mutex);
-
- return ret;
+ return 0;
}
static int uacce_put_queue(struct uacce_queue *q)
{
struct uacce_device *uacce = q->uacce;
- mutex_lock(&uacce_mutex);
-
- if (q->state == UACCE_Q_ZOMBIE)
- goto out;
-
if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
uacce->ops->stop_queue(q);
uacce->ops->put_queue(q);
q->state = UACCE_Q_ZOMBIE;
-out:
- mutex_unlock(&uacce_mutex);
return 0;
}
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
+ long ret = -ENXIO;
+
+ /*
+ * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
+ * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
+ * gets called with mmap_lock held, by taking uacce->mutex instead of
+ * q->mutex. Doing this in uacce_fops_mmap() is not possible because
+ * uacce_fops_open() calls iommu_sva_bind_device(), which takes
+ * mmap_lock, while holding uacce->mutex.
+ */
+ mutex_lock(&uacce->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
switch (cmd) {
case UACCE_CMD_START_Q:
- return uacce_start_queue(q);
-
+ ret = uacce_start_queue(q);
+ break;
case UACCE_CMD_PUT_Q:
- return uacce_put_queue(q);
-
+ ret = uacce_put_queue(q);
+ break;
default:
- if (!uacce->ops->ioctl)
- return -EINVAL;
-
- return uacce->ops->ioctl(q, cmd, arg);
+ if (uacce->ops->ioctl)
+ ret = uacce->ops->ioctl(q, cmd, arg);
+ else
+ ret = -EINVAL;
}
+out_unlock:
+ mutex_unlock(&uacce->mutex);
+ return ret;
}
#ifdef CONFIG_COMPAT
if (!q)
return -ENOMEM;
+ mutex_lock(&uacce->mutex);
+
+ if (!uacce->parent) {
+ ret = -EINVAL;
+ goto out_with_mem;
+ }
+
ret = uacce_bind_queue(uacce, q);
if (ret)
goto out_with_mem;
filep->private_data = q;
uacce->inode = inode;
q->state = UACCE_Q_INIT;
-
- mutex_lock(&uacce->queues_lock);
+ mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
- mutex_unlock(&uacce->queues_lock);
+ mutex_unlock(&uacce->mutex);
return 0;
uacce_unbind_queue(q);
out_with_mem:
kfree(q);
+ mutex_unlock(&uacce->mutex);
return ret;
}
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
- mutex_lock(&q->uacce->queues_lock);
- list_del(&q->list);
- mutex_unlock(&q->uacce->queues_lock);
+ mutex_lock(&uacce->mutex);
uacce_put_queue(q);
uacce_unbind_queue(q);
+ list_del(&q->list);
+ mutex_unlock(&uacce->mutex);
kfree(q);
return 0;
vma->vm_private_data = q;
qfr->type = type;
- mutex_lock(&uacce_mutex);
-
- if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
- ret = -EINVAL;
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q)) {
+ ret = -ENXIO;
goto out_with_lock;
}
}
q->qfrs[type] = qfr;
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
return ret;
out_with_lock:
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
kfree(qfr);
return ret;
}
{
struct uacce_queue *q = file->private_data;
struct uacce_device *uacce = q->uacce;
+ __poll_t ret = 0;
+
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
poll_wait(file, &q->wait, wait);
+
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
- return EPOLLIN | EPOLLRDNORM;
+ ret = EPOLLIN | EPOLLRDNORM;
- return 0;
+out_unlock:
+ mutex_unlock(&q->mutex);
+ return ret;
}
static const struct file_operations uacce_fops = {
goto err_with_uacce;
INIT_LIST_HEAD(&uacce->queues);
- mutex_init(&uacce->queues_lock);
+ mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
if (uacce->inode)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+ /*
+ * uacce_fops_open() may be running concurrently, even after we remove
+ * the cdev. Holding uacce->mutex ensures that open() does not obtain a
+ * removed uacce device.
+ */
+ mutex_lock(&uacce->mutex);
/* ensure no open queue remains */
- mutex_lock(&uacce->queues_lock);
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ /*
+ * Taking q->mutex ensures that fops do not use the defunct
+ * uacce->ops after the queue is disabled.
+ */
+ mutex_lock(&q->mutex);
uacce_put_queue(q);
+ mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
uacce_disable_sva(uacce);
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
+ /*
+ * uacce exists as long as there are open fds, but ops will be freed
+ * now. Ensure that bugs cause NULL deref rather than use-after-free.
+ */
+ uacce->ops = NULL;
+ uacce->parent = NULL;
+ mutex_unlock(&uacce->mutex);
put_device(&uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_remove);