@@ -18,6 +18,7 @@ msm-y := \
mdp4/mdp4_irq.o \
mdp4/mdp4_kms.o \
mdp4/mdp4_plane.o \
+ msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
@@ -51,7 +51,6 @@ struct mdp4_crtc {
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
- struct msm_fence_cb pageflip_cb;
#define PENDING_CURSOR 0x1
#define PENDING_FLIP 0x2
@@ -120,12 +119,16 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
+void mdp4_crtc_flush(struct drm_crtc *crtc)
{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
uint32_t i, flush = 0;
+ if (priv->pending_crtcs & (1 << crtc->id))
+ return;
+
for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
struct drm_plane *plane = mdp4_crtc->planes[i];
if (plane) {
@@ -148,23 +151,6 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
}
-static void pageflip_cb(struct msm_fence_cb *cb)
-{
- struct mdp4_crtc *mdp4_crtc =
- container_of(cb, struct mdp4_crtc, pageflip_cb);
- struct drm_crtc *crtc = &mdp4_crtc->base;
- struct drm_framebuffer *fb = crtc->fb;
-
- if (!fb)
- return;
-
- mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
-}
-
static void unref_fb_worker(struct drm_flip_work *work, void *val)
{
struct mdp4_crtc *mdp4_crtc =
@@ -374,7 +360,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)
static void mdp4_crtc_commit(struct drm_crtc *crtc)
{
mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
/* drop the ref to mdp clk's that we got in prepare: */
mdp4_disable(get_kms(crtc));
}
@@ -405,23 +391,27 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_gem_object *obj;
unsigned long flags;
+ spin_lock_irqsave(&dev->event_lock, flags);
if (mdp4_crtc->event) {
+ spin_unlock_irqrestore(&dev->event_lock, flags);
dev_err(dev->dev, "already pending flip!\n");
return -EBUSY;
}
- obj = msm_framebuffer_bo(new_fb, 0);
-
- spin_lock_irqsave(&dev->event_lock, flags);
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
update_fb(crtc, true, new_fb);
- return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
+ mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
+ mdp4_crtc_flush(crtc);
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
+
+ return 0;
}
static int mdp4_crtc_set_property(struct drm_crtc *crtc, void *state,
@@ -598,8 +588,8 @@ static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
{
struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
struct drm_crtc *crtc = &mdp4_crtc->base;
- DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
- crtc_flush(crtc);
+ DRM_ERROR("%s: error: %08x\n", mdp4_crtc->name, irqstatus);
+ mdp4_crtc_flush(crtc);
}
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
@@ -679,7 +669,7 @@ static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
mdp4_crtc->planes[pipe_id] = plane;
blend_setup(crtc);
if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
- crtc_flush(crtc);
+ mdp4_crtc_flush(crtc);
}
void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
@@ -737,8 +727,6 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
"unref cursor", unref_cursor_worker);
- INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
-
drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
@@ -128,6 +128,11 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
return mdp4_dtv_round_pixclk(encoder, rate);
}
+static void mdp4_flush(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ mdp4_crtc_flush(crtc);
+}
+
static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
@@ -154,6 +159,7 @@ static const struct msm_kms_funcs kms_funcs = {
.disable_vblank = mdp4_disable_vblank,
.get_format = mdp4_get_format,
.round_pixclk = mdp4_round_pixclk,
+ .flush = mdp4_flush,
.preclose = mdp4_preclose,
.destroy = mdp4_destroy,
};
@@ -206,6 +206,7 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane);
+void mdp4_crtc_flush(struct drm_crtc *crtc);
uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
new file mode 100644
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+
+struct msm_async_commit {
+ struct drm_atomic_helper_state *state;
+ uint32_t fence;
+ struct msm_fence_cb fence_cb;
+};
+
+static void fence_cb(struct msm_fence_cb *cb);
+static int commit_sync(struct drm_device *dev, void *state);
+
+static struct msm_async_commit *new_commit(struct drm_atomic_helper_state *state)
+{
+ struct msm_async_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+ if (!c)
+ return NULL;
+
+ drm_atomic_helper_state_reference(state);
+ c->state = state;
+ INIT_FENCE_CB(&c->fence_cb, fence_cb);
+
+ return c;
+}
+static void free_commit(struct msm_async_commit *c)
+{
+ drm_atomic_helper_state_unreference(c->state);
+ kfree(c);
+}
+
+static void fence_cb(struct msm_fence_cb *cb)
+{
+ struct msm_async_commit *c =
+ container_of(cb, struct msm_async_commit, fence_cb);
+ commit_sync(c->state->dev, c->state);
+ free_commit(c);
+}
+
+static void add_fb(struct msm_async_commit *c, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+static int wait_fb(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ // XXX TODO wait..
+ return 0;
+}
+
+#define pending_fb(state) ((state) && (state)->fb && (state)->new_fb)
+
+static int commit_sync(struct drm_device *dev, void *state)
+{
+ struct drm_atomic_helper_state *a = state;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int ncrtcs = dev->mode_config.num_crtc;
+ uint32_t pending_crtcs = 0;
+ int i, ret;
+
+ for (i = 0; i < ncrtcs; i++)
+ if (a->crtcs[i])
+ pending_crtcs |= (1 << a->crtcs[i]->id);
+
+ mutex_lock(&dev->struct_mutex);
+ WARN_ON(priv->pending_crtcs & pending_crtcs);
+ priv->pending_crtcs |= pending_crtcs;
+ mutex_unlock(&dev->struct_mutex);
+
+ ret = drm_atomic_helper_commit(dev, state);
+
+ mutex_lock(&dev->struct_mutex);
+ priv->pending_crtcs &= ~pending_crtcs;
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ncrtcs; i++)
+ if (a->crtcs[i])
+ kms->funcs->flush(kms, a->crtcs[i]);
+
+ return 0;
+}
+
+int msm_atomic_commit(struct drm_device *dev, void *state)
+{
+ struct drm_atomic_helper_state *a = state;
+ int nplanes = dev->mode_config.num_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ int i;
+
+ if (a->flags & DRM_MODE_ATOMIC_NONBLOCK) {
+ /* non-block mode: defer commit until fb's are ready */
+ struct msm_async_commit *c = new_commit(state);
+
+ if (!c)
+ return -ENOMEM;
+
+ for (i = 0; i < nplanes; i++)
+ if (pending_fb(a->pstates[i]))
+ add_fb(c, a->pstates[i]->crtc, a->pstates[i]->fb);
+
+ for (i = 0; i < ncrtcs; i++)
+ if (pending_fb(a->cstates[i]))
+ add_fb(c, a->crtcs[i], a->cstates[i]->fb);
+
+ return msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ } else {
+ /* blocking mode: wait until fb's are ready */
+ int ret = 0;
+
+ for (i = 0; i < nplanes && !ret; i++)
+ if (pending_fb(a->pstates[i]))
+ ret = wait_fb(a->pstates[i]->crtc, a->pstates[i]->fb);
+
+ for (i = 0; i < ncrtcs && !ret; i++)
+ if (pending_fb(a->cstates[i]))
+ ret = wait_fb(a->crtcs[i], a->cstates[i]->fb);
+
+ if (ret)
+ return ret;
+ }
+
+ return commit_sync(dev, state);
+}
@@ -537,6 +537,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret;
}
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&cb->work.entry)) {
+ ret = -EINVAL;
+ } else if (fence > priv->completed_fence) {
+ cb->fence = fence;
+ list_add_tail(&cb->work.entry, &priv->fence_cbs);
+ } else {
+ queue_work(priv->wq, &cb->work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
/* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
@@ -734,7 +754,7 @@ static struct drm_driver msm_driver = {
.atomic_begin = drm_atomic_helper_begin,
.atomic_set_event = drm_atomic_helper_set_event,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
+ .atomic_commit = msm_atomic_commit,
.atomic_end = drm_atomic_helper_end,
.atomic_helpers = &drm_atomic_helper_funcs,
#ifdef CONFIG_DEBUG_FS
@@ -95,6 +95,9 @@ struct msm_drm_private {
unsigned int num_connectors;
struct drm_connector *connectors[8];
+
+ /* crtc's pending atomic update: */
+ uint32_t pending_crtcs;
};
struct msm_format {
@@ -134,6 +137,7 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
+ void (*flush)(struct msm_kms *kms, struct drm_crtc *crtc);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
void (*destroy)(struct msm_kms *kms);
@@ -151,11 +155,15 @@ int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout);
+int msm_queue_fence_cb(struct drm_device *dev,
+ struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
+int msm_atomic_commit(struct drm_device *dev, void *state);
+
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
@@ -400,23 +400,11 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb)
{
- struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
+ uint32_t fence = msm_gem_fence(msm_obj,
+ MSM_PREP_READ | MSM_PREP_WRITE);
- mutex_lock(&dev->struct_mutex);
- if (!list_empty(&cb->work.entry)) {
- ret = -EINVAL;
- } else if (is_active(msm_obj)) {
- cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
- list_add_tail(&cb->work.entry, &priv->fence_cbs);
- } else {
- queue_work(priv->wq, &cb->work);
- }
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
+ return msm_queue_fence_cb(obj->dev, cb, fence);
}
void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -455,12 +443,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0;
if (is_active(msm_obj)) {
- uint32_t fence = 0;
+ uint32_t fence = msm_gem_fence(msm_obj, op);
- if (op & MSM_PREP_READ)
- fence = msm_obj->write_fence;
- if (op & MSM_PREP_WRITE)
- fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC)
timeout = NULL;
@@ -65,6 +65,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL;
}
+static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
+ uint32_t op)
+{
+ uint32_t fence = 0;
+
+ if (op & MSM_PREP_READ)
+ fence = msm_obj->write_fence;
+ if (op & MSM_PREP_WRITE)
+ fence = max(fence, msm_obj->read_fence);
+
+ return fence;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,