@@ -9,43 +9,38 @@
static struct class *uacce_class;
static dev_t uacce_devt;
-static DEFINE_MUTEX(uacce_mutex);
static DEFINE_XARRAY_ALLOC(uacce_xa);
-static int uacce_start_queue(struct uacce_queue *q)
+/*
+ * If the parent driver or the device disappears, the queue state is invalid and
+ * ops are not usable anymore.
+ */
+static bool uacce_queue_is_valid(struct uacce_queue *q)
{
- int ret = 0;
+ return q->state == UACCE_Q_INIT || q->state == UACCE_Q_STARTED;
+}
- mutex_lock(&uacce_mutex);
+static int uacce_start_queue(struct uacce_queue *q)
+{
+ int ret;
- if (q->state != UACCE_Q_INIT) {
- ret = -EINVAL;
- goto out_with_lock;
- }
+ if (q->state != UACCE_Q_INIT)
+ return -EINVAL;
if (q->uacce->ops->start_queue) {
ret = q->uacce->ops->start_queue(q);
if (ret < 0)
- goto out_with_lock;
+ return ret;
}
q->state = UACCE_Q_STARTED;
-
-out_with_lock:
- mutex_unlock(&uacce_mutex);
-
- return ret;
+ return 0;
}
static int uacce_put_queue(struct uacce_queue *q)
{
struct uacce_device *uacce = q->uacce;
- mutex_lock(&uacce_mutex);
-
- if (q->state == UACCE_Q_ZOMBIE)
- goto out;
-
if ((q->state == UACCE_Q_STARTED) && uacce->ops->stop_queue)
uacce->ops->stop_queue(q);
@@ -54,8 +49,6 @@ static int uacce_put_queue(struct uacce_queue *q)
uacce->ops->put_queue(q);
q->state = UACCE_Q_ZOMBIE;
-out:
- mutex_unlock(&uacce_mutex);
return 0;
}
@@ -65,20 +58,36 @@ static long uacce_fops_unl_ioctl(struct file *filep,
{
struct uacce_queue *q = filep->private_data;
struct uacce_device *uacce = q->uacce;
+ long ret = -ENXIO;
+
+ /*
+ * uacce->ops->ioctl() may take the mmap_lock when copying arg to/from
+ * user. Avoid a circular lock dependency with uacce_fops_mmap(), which
+ * gets called with mmap_lock held, by taking uacce->mutex instead of
+ * q->mutex. Doing this in uacce_fops_mmap() is not possible because
+ * uacce_fops_open() calls iommu_sva_bind_device(), which takes
+ * mmap_lock, while holding uacce->mutex.
+ */
+ mutex_lock(&uacce->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
switch (cmd) {
case UACCE_CMD_START_Q:
- return uacce_start_queue(q);
-
+ ret = uacce_start_queue(q);
+ break;
case UACCE_CMD_PUT_Q:
- return uacce_put_queue(q);
-
+ ret = uacce_put_queue(q);
+ break;
default:
- if (!uacce->ops->ioctl)
- return -EINVAL;
-
- return uacce->ops->ioctl(q, cmd, arg);
+ if (uacce->ops->ioctl)
+ ret = uacce->ops->ioctl(q, cmd, arg);
+ else
+ ret = -EINVAL;
}
+out_unlock:
+ mutex_unlock(&uacce->mutex);
+ return ret;
}
#ifdef CONFIG_COMPAT
@@ -136,6 +145,13 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
if (!q)
return -ENOMEM;
+ mutex_lock(&uacce->mutex);
+
+ if (!uacce->parent) {
+ ret = -EINVAL;
+ goto out_with_mem;
+ }
+
ret = uacce_bind_queue(uacce, q);
if (ret)
goto out_with_mem;
@@ -152,10 +168,9 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
filep->private_data = q;
uacce->inode = inode;
q->state = UACCE_Q_INIT;
-
- mutex_lock(&uacce->queues_lock);
+ mutex_init(&q->mutex);
list_add(&q->list, &uacce->queues);
- mutex_unlock(&uacce->queues_lock);
+ mutex_unlock(&uacce->mutex);
return 0;
@@ -163,18 +178,20 @@ static int uacce_fops_open(struct inode *inode, struct file *filep)
uacce_unbind_queue(q);
out_with_mem:
kfree(q);
+ mutex_unlock(&uacce->mutex);
return ret;
}
static int uacce_fops_release(struct inode *inode, struct file *filep)
{
struct uacce_queue *q = filep->private_data;
+ struct uacce_device *uacce = q->uacce;
- mutex_lock(&q->uacce->queues_lock);
- list_del(&q->list);
- mutex_unlock(&q->uacce->queues_lock);
+ mutex_lock(&uacce->mutex);
uacce_put_queue(q);
uacce_unbind_queue(q);
+ list_del(&q->list);
+ mutex_unlock(&uacce->mutex);
kfree(q);
return 0;
@@ -217,10 +234,9 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
vma->vm_private_data = q;
qfr->type = type;
- mutex_lock(&uacce_mutex);
-
- if (q->state != UACCE_Q_INIT && q->state != UACCE_Q_STARTED) {
- ret = -EINVAL;
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q)) {
+ ret = -ENXIO;
goto out_with_lock;
}
@@ -248,12 +264,12 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
}
q->qfrs[type] = qfr;
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
return ret;
out_with_lock:
- mutex_unlock(&uacce_mutex);
+ mutex_unlock(&q->mutex);
kfree(qfr);
return ret;
}
@@ -262,12 +278,20 @@ static __poll_t uacce_fops_poll(struct file *file, poll_table *wait)
{
struct uacce_queue *q = file->private_data;
struct uacce_device *uacce = q->uacce;
+ __poll_t ret = 0;
+
+ mutex_lock(&q->mutex);
+ if (!uacce_queue_is_valid(q))
+ goto out_unlock;
poll_wait(file, &q->wait, wait);
+
if (uacce->ops->is_q_updated && uacce->ops->is_q_updated(q))
- return EPOLLIN | EPOLLRDNORM;
+ ret = EPOLLIN | EPOLLRDNORM;
- return 0;
+out_unlock:
+ mutex_unlock(&q->mutex);
+ return ret;
}
static const struct file_operations uacce_fops = {
@@ -450,7 +474,7 @@ struct uacce_device *uacce_alloc(struct device *parent,
goto err_with_uacce;
INIT_LIST_HEAD(&uacce->queues);
- mutex_init(&uacce->queues_lock);
+ mutex_init(&uacce->mutex);
device_initialize(&uacce->dev);
uacce->dev.devt = MKDEV(MAJOR(uacce_devt), uacce->dev_id);
uacce->dev.class = uacce_class;
@@ -507,13 +531,23 @@ void uacce_remove(struct uacce_device *uacce)
if (uacce->inode)
unmap_mapping_range(uacce->inode->i_mapping, 0, 0, 1);
+ /*
+ * uacce_fops_open() may be running concurrently, even after we remove
+ * the cdev. Holding uacce->mutex ensures that open() does not obtain a
+ * removed uacce device.
+ */
+ mutex_lock(&uacce->mutex);
/* ensure no open queue remains */
- mutex_lock(&uacce->queues_lock);
list_for_each_entry_safe(q, next_q, &uacce->queues, list) {
+ /*
+ * Taking q->mutex ensures that fops do not use the defunct
+ * uacce->ops after the queue is disabled.
+ */
+ mutex_lock(&q->mutex);
uacce_put_queue(q);
+ mutex_unlock(&q->mutex);
uacce_unbind_queue(q);
}
- mutex_unlock(&uacce->queues_lock);
/* disable sva now since no opened queues */
uacce_disable_sva(uacce);
@@ -521,6 +555,13 @@ void uacce_remove(struct uacce_device *uacce)
if (uacce->cdev)
cdev_device_del(uacce->cdev, &uacce->dev);
xa_erase(&uacce_xa, uacce->dev_id);
+ /*
+ * uacce exists as long as there are open fds, but ops will be freed
+ * now. Ensure that bugs cause NULL deref rather than use-after-free.
+ */
+ uacce->ops = NULL;
+ uacce->parent = NULL;
+ mutex_unlock(&uacce->mutex);
put_device(&uacce->dev);
}
EXPORT_SYMBOL_GPL(uacce_remove);
@@ -70,6 +70,7 @@ enum uacce_q_state {
* @wait: wait queue head
* @list: index into uacce queues list
* @qfrs: pointer of qfr regions
+ * @mutex: protects queue state
* @state: queue state machine
* @pasid: pasid associated to the mm
* @handle: iommu_sva handle returned by iommu_sva_bind_device()
@@ -80,6 +81,7 @@ struct uacce_queue {
wait_queue_head_t wait;
struct list_head list;
struct uacce_qfile_region *qfrs[UACCE_MAX_REGION];
+ struct mutex mutex;
enum uacce_q_state state;
u32 pasid;
struct iommu_sva *handle;
@@ -97,9 +99,9 @@ struct uacce_queue {
* @dev_id: id of the uacce device
* @cdev: cdev of the uacce
* @dev: dev of the uacce
+ * @mutex: protects uacce operation
* @priv: private pointer of the uacce
* @queues: list of queues
- * @queues_lock: lock for queues list
* @inode: core vfs
*/
struct uacce_device {
@@ -113,9 +115,9 @@ struct uacce_device {
u32 dev_id;
struct cdev *cdev;
struct device dev;
+ struct mutex mutex;
void *priv;
struct list_head queues;
- struct mutex queues_lock;
struct inode *inode;
};