@@ -580,61 +580,57 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
-static bool
-drm_dequeue_event(struct drm_file *file_priv,
- size_t total, size_t max, struct drm_pending_event **out)
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
{
+ struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
struct drm_pending_event *e;
unsigned long flags;
- bool ret = false;
-
- spin_lock_irqsave(&dev->event_lock, flags);
+ ssize_t ret;
+ int err;
- *out = NULL;
- if (list_empty(&file_priv->event_list))
- goto out;
- e = list_first_entry(&file_priv->event_list,
- struct drm_pending_event, link);
- if (e->event->length + total > max)
- goto out;
+ if ((filp->f_flags & O_NONBLOCK) == 0) {
+ ret = wait_event_interruptible(file_priv->event_wait,
+ !list_empty(&file_priv->event_list));
+ if (ret < 0)
+ return ret;
+ }
- file_priv->event_space += e->event->length;
- list_del(&e->link);
- *out = e;
- ret = true;
+ ret = err = 0;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ do {
+ if (list_empty(&file_priv->event_list)) {
+ if (flip->f_flags & O_NONBLOCK)
+ err = -EAGAIN;
+ break;
+ }
-out:
- spin_unlock_irqrestore(&dev->event_lock, flags);
- return ret;
-}
+ e = list_first_entry(&file_priv->event_list,
+ struct drm_pending_event, link);
+ if (e->event->length + ret > max) {
+ err = -EINVAL;
+ break;
+ }
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_pending_event *e;
- size_t total;
- ssize_t ret;
+ file_priv->event_space += e->event->length;
+ list_del(&e->link);
- ret = wait_event_interruptible(file_priv->event_wait,
- !list_empty(&file_priv->event_list));
- if (ret < 0)
- return ret;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
- total = 0;
- while (drm_dequeue_event(file_priv, total, count, &e)) {
- if (copy_to_user(buffer + total,
- e->event, e->event->length)) {
- total = -EFAULT;
+ if (copy_to_user(buffer + ret, e->event, e->event->length)) {
+ err = -EFAULT;
break;
}
- total += e->event->length;
+ ret += e->event->length;
e->destroy(e);
- }
- return total;
+ spin_lock_irqsave(&dev->event_lock, flags);
+ } while (1);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return ret ? ret : err;
}
EXPORT_SYMBOL(drm_read);
Otherwise drmHandleEvent will block if accidentally read too often. In order to handle the case where the event is read off the queue by another thread before we dequeue the event, we delay the actual checking for EAGAIN to under the spinlock and so inline drm_dequeue_event(). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/drm_fops.c | 76 +++++++++++++++++++++----------------------- 1 files changed, 36 insertions(+), 40 deletions(-)