@@ -347,6 +347,8 @@ void drm_vblank_cleanup(struct drm_device *dev)
drm_core_check_feature(dev, DRIVER_MODESET));
del_timer_sync(&vblank->disable_timer);
+
+ flush_work(&vblank->unprepare.work);
}
kfree(dev->vblank);
@@ -355,6 +357,20 @@ void drm_vblank_cleanup(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_vblank_cleanup);
+static void drm_vblank_unprepare_work_fn(struct work_struct *work)
+{
+ struct drm_vblank_crtc *vblank;
+ struct drm_device *dev;
+
+ vblank = container_of(work, typeof(*vblank), unprepare.work);
+ dev = vblank->dev;
+
+ do {
+ if (dev->driver->unprepare_vblank)
+ dev->driver->unprepare_vblank(dev, vblank->pipe);
+ } while (!atomic_dec_and_test(&vblank->unprepare.counter));
+}
+
/**
* drm_vblank_init - initialize vblank support
* @dev: DRM device
@@ -388,6 +404,8 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
setup_timer(&vblank->disable_timer, vblank_disable_fn,
(unsigned long)vblank);
seqlock_init(&vblank->seqlock);
+ INIT_WORK(&vblank->unprepare.work,
+ drm_vblank_unprepare_work_fn);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -1170,6 +1188,9 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
if (WARN_ON(pipe >= dev->num_crtcs))
return -EINVAL;
+ if (dev->driver->prepare_vblank)
+ dev->driver->prepare_vblank(dev, pipe);
+
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &vblank->refcount) == 1) {
@@ -1182,6 +1203,9 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ if (ret != 0 && dev->driver->unprepare_vblank)
+ dev->driver->unprepare_vblank(dev, pipe);
+
return ret;
}
EXPORT_SYMBOL(drm_vblank_get);
@@ -1234,6 +1258,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
}
+
+ atomic_inc(&vblank->unprepare.counter);
+ schedule_work(&vblank->unprepare.work);
}
EXPORT_SYMBOL(drm_vblank_put);
@@ -1668,7 +1695,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
}
spin_unlock_irqrestore(&dev->event_lock, flags);
-
return 0;
err_unlock:
@@ -443,6 +443,30 @@ struct drm_driver {
u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
/**
+ * prepare_vblank - Optional prepare vblank hook.
+ * @dev: DRM device
+ * @pipe: counter to fetch
+ *
+ * Drivers that need to handle any kind of mutex or any other sleeping
+ * code in combination with vblanks need to implement this hook
+ * that will be called before drm_vblank_get spin_lock gets.
+ */
+ void (*prepare_vblank) (struct drm_device *dev, unsigned int pipe);
+
+ /**
+ * unprepare_vblank - Optional unprepare vblank hook.
+ * @dev: DRM device
+ * @pipe: counter to fetch
+ *
+ * Drivers that need to handle any kind of mutex or any other sleeping
+ * code in combination with vblanks need to implement this hook
+ * that will be called in a work queue to be executed after spin lock
+ * areas of drm_vblank_put.
+ */
+ void (*unprepare_vblank) (struct drm_device *dev, unsigned int pipe);
+
+
+ /**
* enable_vblank - enable vblank interrupt events
* @dev: DRM device
* @pipe: which irq to enable
@@ -716,6 +740,11 @@ struct drm_pending_vblank_event {
struct drm_event_vblank event;
};
+struct drm_vblank_unprepare {
+ struct work_struct work; /* Post disable worker */
+ atomic_t counter; /* Number of vblanks handled */
+};
+
struct drm_vblank_crtc {
struct drm_device *dev; /* pointer to the drm_device */
wait_queue_head_t queue; /**< VBLANK wait queue */
@@ -736,6 +765,7 @@ struct drm_vblank_crtc {
int linedur_ns; /* line duration in ns */
bool enabled; /* so we don't call enable more than
once per disable */
+ struct drm_vblank_unprepare unprepare; /* Unprepare work helper */
};
/**