diff mbox

[RFCv1,2/2] drm/msm: basic KMS driver for snapdragon

Message ID 1373054027-12948-3-git-send-email-robdclark@gmail.com (mailing list archive)
State Superseded
Headers show

Commit Message

Rob Clark July 5, 2013, 7:53 p.m. UTC
The snapdragon chips have multiple different display controllers,
depending on which chip variant/version.  (As far as I can tell, current
devices have either MDP3 or MDP4, and upcoming devices have MDSS.)  And
then external to the display controller are HDMI, DSI, etc. blocks which
may be shared across devices which have different display controller
blocks.

To more easily add support for different display controller blocks, the
display controller specific bits are split out into a "kms" object,
which provides the kms plane/crtc/encoder objects.

The external HDMI, DSI, etc. blocks are part encoder, and part connector
currently.  But I think I will pull in the drm_bridge patches from
chromeos tree, and split them into a bridge+connector, with the
registers that need to be set in modeset handled by the bridge.  This
would remove the 'msm_connector' base class.  But some things need to be
double checked to make sure I could get the correct ON/OFF sequencing..

Signed-off-by: Rob Clark <robdclark@gmail.com>
---
 drivers/gpu/drm/Kconfig                     |   2 +
 drivers/gpu/drm/Makefile                    |   1 +
 drivers/gpu/drm/msm/Kconfig                 |  34 ++
 drivers/gpu/drm/msm/Makefile                |  23 ++
 drivers/gpu/drm/msm/NOTES                   |  43 +++
 drivers/gpu/drm/msm/hdmi/hdmi_connector.c   | 528 ++++++++++++++++++++++++++++
 drivers/gpu/drm/msm/hdmi/hdmi_connector.h   |  95 +++++
 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c         | 264 ++++++++++++++
 drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c    | 140 ++++++++
 drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c    | 215 +++++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_crtc.c        | 440 +++++++++++++++++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c | 306 ++++++++++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_irq.c         | 194 ++++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_kms.c         | 359 +++++++++++++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_kms.h         | 161 +++++++++
 drivers/gpu/drm/msm/mdp4/mdp4_plane.c       | 241 +++++++++++++
 drivers/gpu/drm/msm/msm_connector.c         |  34 ++
 drivers/gpu/drm/msm/msm_connector.h         |  68 ++++
 drivers/gpu/drm/msm/msm_drv.c               | 491 ++++++++++++++++++++++++++
 drivers/gpu/drm/msm/msm_drv.h               | 161 +++++++++
 drivers/gpu/drm/msm/msm_fb.c                | 216 ++++++++++++
 drivers/gpu/drm/msm/msm_fbdev.c             | 255 ++++++++++++++
 drivers/gpu/drm/msm/msm_gem.c               | 441 +++++++++++++++++++++++
 23 files changed, 4712 insertions(+)
 create mode 100644 drivers/gpu/drm/msm/Kconfig
 create mode 100644 drivers/gpu/drm/msm/Makefile
 create mode 100644 drivers/gpu/drm/msm/NOTES
 create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_connector.c
 create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_connector.h
 create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
 create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
 create mode 100644 drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_irq.c
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_kms.c
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_kms.h
 create mode 100644 drivers/gpu/drm/msm/mdp4/mdp4_plane.c
 create mode 100644 drivers/gpu/drm/msm/msm_connector.c
 create mode 100644 drivers/gpu/drm/msm/msm_connector.h
 create mode 100644 drivers/gpu/drm/msm/msm_drv.c
 create mode 100644 drivers/gpu/drm/msm/msm_drv.h
 create mode 100644 drivers/gpu/drm/msm/msm_fb.c
 create mode 100644 drivers/gpu/drm/msm/msm_fbdev.c
 create mode 100644 drivers/gpu/drm/msm/msm_gem.c

Comments

Jordan Crouse July 8, 2013, 11:17 p.m. UTC | #1
On 07/05/2013 01:53 PM, Rob Clark wrote:
> The snapdragon chips have multiple different display controllers,
> depending on which chip variant/version.  (As far as I can tell, current
> devices have either MDP3 or MDP4, and upcoming devices have MDSS.)  And
> then external to the display controller are HDMI, DSI, etc. blocks which
> may be shared across devices which have different display controller
> blocks.
>
> To more easily add support for different display controller blocks, the
> display controller specific bits are split out into a "kms" object,
> which provides the kms plane/crtc/encoder objects.
>
> The external HDMI, DSI, etc. blocks are part encoder, and part connector
> currently.  But I think I will pull in the drm_bridge patches from
> chromeos tree, and split them into a bridge+connector, with the
> registers that need to be set in modeset handled by the bridge.  This
> would remove the 'msm_connector' base class.  But some things need to be
> double checked to make sure I could get the correct ON/OFF sequencing..
>
> Signed-off-by: Rob Clark <robdclark@gmail.com>

> diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
> new file mode 100644
> index 0000000..b9e9d03
> --- /dev/null
> +++ b/drivers/gpu/drm/msm/NOTES
> @@ -0,0 +1,43 @@
> +Rough thoughts/notes..
> +
> +We have (at least) 3 different display controller blocks at play:
> + + MDP3 - ?? seems to be what is on geeksphone peak device
> + + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
> + + MDSS - snapdragon 800
> +
> +(I don't have a completely clear picture on which display controller
> +is in which devices)
> +
> +But, HDMI/DSI/etc blocks seem like they can be shared.  And I for sure
> +don't want to have to deal with N different kms devices from
> +xf86-video-freedreno.  Plus, it seems like we can do some clever tricks
> +like have kms/crtc code build up gpu cmdstream to update scanout after
> +rendering without involving the cpu.
> +
> +And on gpu side of things:
> + + zero, one, or two 2d cores (z180)

Life would be easier if we just forgot that z180 existed.

> + + and either a2xx or a3xx 3d core.

A2XX will probably be less interesting to everybody except folks trying to
get their ancient phones working.  That said it might be smart to keep the
GPU sub device split because future.

> +
> +So, one drm driver, with some modularity.  Different 'struct msm_kms'
> +implementations, depending on display controller.  And one or more
> +'struct msm_gpu' for the various different gpu sub-modules.

If Z180 goes poof then we could conceivably use 'adreno' for a name which
is a nice way to compartmentalize the GPU code.  On the other hand msm_gpu
has consistency going for it.

> +The kms module provides the plane, crtc, and encoder objects, and
> +loads whatever connectors are appropriate.
> +
> +For MDP4, the mapping is (I think):
> +
> +  plane   -> PIPE{RGBn,VGn}              \
> +  crtc    -> OVLP{n} + DMA{P,S,E} (??)   |-> MDP "device"
> +  encoder -> DTV/LCDC/DSI (within MDP4)  /
> +  connector -> HDMI/DSI/etc              --> other device(s)
> +
> +Since the irq's that drm core mostly cares about are vblank/framedone,
> +we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
> +and treat the MDP4 block's irq as "the" irq.  Even though the connectors
> +may have their own irqs which they install themselves.  For this reason
> +the display controller is the "master" device.
> +
> +Each connector probably ends up being a seperate device, just for the
> +logistics of finding/mapping io region, irq, etc.
> +

> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
> new file mode 100644
> index 0000000..e6ccef9
> --- /dev/null
> +++ b/drivers/gpu/drm/msm/msm_drv.c
> @@ -0,0 +1,491 @@
> +/*
> + * Copyright (C) 2013 Red Hat
> + * Author: Rob Clark <robdclark@gmail.com>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include "msm_drv.h"
> +
> +static void msm_fb_output_poll_changed(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	if (priv->fbdev)
> +		drm_fb_helper_hotplug_event(priv->fbdev);
> +}
> +
> +static const struct drm_mode_config_funcs mode_config_funcs = {
> +	.fb_create = msm_framebuffer_create,
> +	.output_poll_changed = msm_fb_output_poll_changed,
> +};
> +
> +static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
> +		unsigned long iova, int flags)
> +{
> +	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
> +	return 0;
> +}
> +
> +int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	int idx = priv->num_iommus++;
> +
> +	if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
> +		return -EINVAL;
> +
> +	priv->iommus[idx] = iommu;
> +
> +	iommu_set_fault_handler(iommu, msm_fault_handler);
> +
> +	/* need to iommu_attach_device() somewhere??  on resume?? */

We are going to end up with 2 IOMMUs to deal with.

> +	return idx;
> +}
> +
> +#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
> +static bool reglog = false;
> +MODULE_PARM_DESC(reglog, "Enable register read/write logging");
> +module_param(reglog, bool, 0600);
> +#else
> +#define reglog 0
> +#endif
> +
> +void __iomem *msm_ioremap(struct device *dev, resource_size_t offset,
> +		unsigned long size, const char *name)
> +{
> +	void __iomem *ptr = devm_ioremap_nocache(dev, offset, size);
> +	if (reglog)
> +		printk(KERN_DEBUG "IO:region %s %08x %08lx\n", name, (u32)ptr, size);
> +	return ptr;
> +}
> +
> +void msm_writel(u32 data, void __iomem *addr)
> +{
> +	if (reglog)
> +		printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
> +	writel(data, addr);
> +}
> +
> +u32 msm_readl(const void __iomem *addr)
> +{
> +	u32 val = readl(addr);
> +	if (reglog)
> +		printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
> +	return val;
> +}
> +
> +/*
> + * DRM operations:
> + */
> +
> +static int msm_unload(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +
> +	drm_kms_helper_poll_fini(dev);
> +	drm_mode_config_cleanup(dev);
> +	drm_vblank_cleanup(dev);
> +
> +	pm_runtime_get_sync(dev->dev);
> +	drm_irq_uninstall(dev);
> +	pm_runtime_put_sync(dev->dev);
> +
> +	flush_workqueue(priv->wq);
> +	destroy_workqueue(priv->wq);
> +
> +	if (kms) {
> +		pm_runtime_disable(dev->dev);
> +		kms->funcs->destroy(kms);
> +	}
> +
> +	dev->dev_private = NULL;
> +
> +	pm_runtime_disable(dev->dev);
> +
> +	kfree(priv);
> +
> +	return 0;
> +}
> +
> +static int msm_load(struct drm_device *dev, unsigned long flags)
> +{
> +	struct platform_device *pdev = dev->platformdev;
> +	struct msm_drm_private *priv;
> +	struct msm_kms *kms;
> +	int ret;
> +
> +	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> +	if (!priv) {
> +		dev_err(dev->dev, "failed to allocate private data\n");
> +		return -ENOMEM;
> +	}
> +
> +	dev->dev_private = priv;
> +
> +	priv->wq = alloc_ordered_workqueue("msm", 0);
> +
> +	INIT_LIST_HEAD(&priv->obj_list);
> +
> +	drm_mode_config_init(dev);
> +
> +	kms = mdp4_kms_init(dev);
> +	if (IS_ERR(kms)) {
> +		/*
> +		 * NOTE: once we have GPU support, having no kms should not
> +		 * be considered fatal.. ideally we would still support gpu
> +		 * and (for example) use dmabuf/prime to share buffers with
> +		 * imx drm driver on iMX5
> +		 */
> +		dev_err(dev->dev, "failed to load kms\n");
> +		ret = PTR_ERR(priv->kms);
> +		goto fail;
> +	}
> +
> +	priv->kms = kms;
> +
> +	if (kms) {
> +		pm_runtime_enable(dev->dev);
> +		ret = kms->funcs->hw_init(kms);
> +		if (ret) {
> +			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
> +			goto fail;
> +		}
> +	}
> +
> +	dev->mode_config.min_width = 0;
> +	dev->mode_config.min_height = 0;
> +	dev->mode_config.max_width = 2048;
> +	dev->mode_config.max_height = 2048;
> +	dev->mode_config.funcs = &mode_config_funcs;
> +
> +	ret = drm_vblank_init(dev, 1);
> +	if (ret < 0) {
> +		dev_err(dev->dev, "failed to initialize vblank\n");
> +		goto fail;
> +	}
> +
> +	pm_runtime_get_sync(dev->dev);
> +	ret = drm_irq_install(dev);
> +	pm_runtime_put_sync(dev->dev);
> +	if (ret < 0) {
> +		dev_err(dev->dev, "failed to install IRQ handler\n");
> +		goto fail;
> +	}
> +
> +	platform_set_drvdata(pdev, dev);
> +
> +#ifdef CONFIG_DRM_MSM_FBDEV
> +	priv->fbdev = msm_fbdev_init(dev);
> +#endif
> +
> +	drm_kms_helper_poll_init(dev);
> +
> +	return 0;
> +
> +fail:
> +	msm_unload(dev);
> +	return ret;
> +}
> +
> +static void msm_preclose(struct drm_device *dev, struct drm_file *file)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	if (kms)
> +		kms->funcs->preclose(kms, file);
> +}
> +
> +static void msm_lastclose(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	if (priv->fbdev) {
> +		drm_modeset_lock_all(dev);
> +		drm_fb_helper_restore_fbdev_mode(priv->fbdev);
> +		drm_modeset_unlock_all(dev);
> +	}
> +}
> +
> +static irqreturn_t msm_irq(DRM_IRQ_ARGS)
> +{
> +	struct drm_device *dev = arg;
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	BUG_ON(!kms);
> +	return kms->funcs->irq(kms);

And we will have separate interrupts too - has anybody else had to
deal with that (too lazy to check).

> +}
> +
> +static void msm_irq_preinstall(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	BUG_ON(!kms);
> +	kms->funcs->irq_preinstall(kms);
> +}
> +
> +static int msm_irq_postinstall(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	BUG_ON(!kms);
> +	return kms->funcs->irq_postinstall(kms);
> +}
> +
> +static void msm_irq_uninstall(struct drm_device *dev)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	BUG_ON(!kms);
> +	kms->funcs->irq_uninstall(kms);
> +}
> +
> +static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	if (!kms)
> +		return -ENXIO;
> +	DBG("dev=%p, crtc=%d", dev, crtc_id);
> +	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
> +}
> +
> +static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	if (!kms)
> +		return;
> +	DBG("dev=%p, crtc=%d", dev, crtc_id);
> +	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
> +}
> +
> +#ifdef CONFIG_DEBUG_FS
> +static int msm_gem_show(struct seq_file *m, void *arg)
> +{
> +	struct drm_info_node *node = (struct drm_info_node *) m->private;
> +	struct drm_device *dev = node->minor->dev;
> +	struct msm_drm_private *priv = dev->dev_private;
> +	int ret;
> +
> +	ret = mutex_lock_interruptible(&dev->struct_mutex);
> +	if (ret)
> +		return ret;
> +
> +	seq_printf(m, "All Objects:\n");
> +	msm_gem_describe_objects(&priv->obj_list, m);
> +
> +	mutex_unlock(&dev->struct_mutex);
> +
> +	return 0;
> +}
> +
> +static int msm_mm_show(struct seq_file *m, void *arg)
> +{
> +	struct drm_info_node *node = (struct drm_info_node *) m->private;
> +	struct drm_device *dev = node->minor->dev;
> +	return drm_mm_dump_table(m, dev->mm_private);
> +}
> +
> +static int msm_fb_show(struct seq_file *m, void *arg)
> +{
> +	struct drm_info_node *node = (struct drm_info_node *) m->private;
> +	struct drm_device *dev = node->minor->dev;
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct drm_framebuffer *fb, *fbdev_fb = NULL;
> +
> +	if (priv->fbdev) {
> +		seq_printf(m, "fbcon ");
> +		fbdev_fb = priv->fbdev->fb;
> +		msm_framebuffer_describe(fbdev_fb, m);
> +	}
> +
> +	mutex_lock(&dev->mode_config.fb_lock);
> +	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
> +		if (fb == fbdev_fb)
> +			continue;
> +
> +		seq_printf(m, "user ");
> +		msm_framebuffer_describe(fb, m);
> +	}
> +	mutex_unlock(&dev->mode_config.fb_lock);
> +
> +	return 0;
> +}
> +
> +static struct drm_info_list msm_debugfs_list[] = {
> +		{"gem", msm_gem_show, 0},
> +		{ "mm", msm_mm_show,   0 },
> +		{ "fb", msm_fb_show, 0 },
> +};
> +
> +static int msm_debugfs_init(struct drm_minor *minor)
> +{
> +	struct drm_device *dev = minor->dev;
> +	int ret;
> +
> +	ret = drm_debugfs_create_files(msm_debugfs_list,
> +			ARRAY_SIZE(msm_debugfs_list),
> +			minor->debugfs_root, minor);
> +
> +	if (ret) {
> +		dev_err(dev->dev, "could not install msm_debugfs_list\n");
> +		return ret;
> +	}
> +
> +	return ret;
> +}
> +
> +static void msm_debugfs_cleanup(struct drm_minor *minor)
> +{
> +	drm_debugfs_remove_files(msm_debugfs_list,
> +			ARRAY_SIZE(msm_debugfs_list), minor);
> +}
> +#endif
> +
> +static const struct vm_operations_struct vm_ops = {
> +	.fault = msm_gem_fault,
> +	.open = drm_gem_vm_open,
> +	.close = drm_gem_vm_close,
> +};
> +
> +static const struct file_operations fops = {
> +	.owner              = THIS_MODULE,
> +	.open               = drm_open,
> +	.release            = drm_release,
> +	.unlocked_ioctl     = drm_ioctl,
> +#ifdef CONFIG_COMPAT
> +	.compat_ioctl       = drm_compat_ioctl,
> +#endif
> +	.poll               = drm_poll,
> +	.read               = drm_read,
> +	.fasync             = drm_fasync,
> +	.llseek             = no_llseek,
> +	.mmap               = msm_gem_mmap,
> +};
> +
> +static struct drm_driver msm_driver = {
> +	.driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
> +	.load               = msm_load,
> +	.unload             = msm_unload,
> +	.preclose           = msm_preclose,
> +	.lastclose          = msm_lastclose,
> +	.irq_handler        = msm_irq,
> +	.irq_preinstall     = msm_irq_preinstall,
> +	.irq_postinstall    = msm_irq_postinstall,
> +	.irq_uninstall      = msm_irq_uninstall,
> +	.get_vblank_counter = drm_vblank_count,
> +	.enable_vblank      = msm_enable_vblank,
> +	.disable_vblank     = msm_disable_vblank,
> +	.gem_free_object    = msm_gem_free_object,
> +	.gem_vm_ops         = &vm_ops,
> +	.dumb_create        = msm_gem_dumb_create,
> +	.dumb_map_offset    = msm_gem_dumb_map_offset,
> +	.dumb_destroy       = msm_gem_dumb_destroy,
> +#ifdef CONFIG_DEBUG_FS
> +	.debugfs_init       = msm_debugfs_init,
> +	.debugfs_cleanup    = msm_debugfs_cleanup,
> +#endif
> +	.fops               = &fops,
> +	.name               = "msm",
> +	.desc               = "MSM Snapdragon DRM",
> +	.date               = "20130625",
> +	.major              = 1,
> +	.minor              = 0,
> +};
> +
> +#ifdef CONFIG_PM_SLEEP
> +static int msm_pm_suspend(struct device *dev)
> +{
> +	struct drm_device *ddev = dev_get_drvdata(dev);
> +	struct msm_drm_private *priv = ddev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +
> +	drm_kms_helper_poll_disable(ddev);
> +
> +	return kms->funcs->pm_suspend(kms);
> +}
> +
> +static int msm_pm_resume(struct device *dev)
> +{
> +	struct drm_device *ddev = dev_get_drvdata(dev);
> +	struct msm_drm_private *priv = ddev->dev_private;
> +	struct msm_kms *kms = priv->kms;
> +	int ret = 0;
> +
> +	ret = kms->funcs->pm_resume(kms);
> +	if (ret)
> +		return ret;
> +
> +	drm_kms_helper_poll_enable(ddev);
> +
> +	return 0;
> +}
> +#endif
> +
> +static const struct dev_pm_ops msm_pm_ops = {
> +	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
> +};
> +
> +/*
> + * Platform driver:
> + */
> +
> +static int msm_pdev_probe(struct platform_device *pdev)
> +{
> +	return drm_platform_init(&msm_driver, pdev);
> +}
> +
> +static int msm_pdev_remove(struct platform_device *pdev)
> +{
> +	drm_platform_exit(&msm_driver, pdev);
> +
> +	return 0;
> +}
> +
> +static const struct platform_device_id msm_id[] = {
> +	{ "mdp", 0 },
> +	{ }
> +};
> +
> +static struct platform_driver msm_platform_driver = {
> +	.probe      = msm_pdev_probe,
> +	.remove     = msm_pdev_remove,
> +	.driver     = {
> +		.owner  = THIS_MODULE,
> +		.name   = "msm",
> +		.pm     = &msm_pm_ops,
> +	},
> +	.id_table   = msm_id,
> +};
> +
> +static int __init msm_drm_init(void)
> +{
> +	DBG("init");
> +	hdmi_init();
> +	return platform_driver_register(&msm_platform_driver);
> +}
> +
> +static void __exit msm_drm_fini(void)
> +{
> +	DBG("fini");
> +	platform_driver_unregister(&msm_platform_driver);
> +	hdmi_fini();
> +}
> +
> +module_init(msm_drm_init);
> +module_exit(msm_drm_fini);
> +
> +MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
> +MODULE_DESCRIPTION("MSM DRM Driver");
> +MODULE_LICENSE("GPL");

> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
> new file mode 100644
> index 0000000..a996490
> --- /dev/null
> +++ b/drivers/gpu/drm/msm/msm_gem.c
> @@ -0,0 +1,441 @@
> +/*
> + * Copyright (C) 2013 Red Hat
> + * Author: Rob Clark <robdclark@gmail.com>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/spinlock.h>
> +#include <linux/shmem_fs.h>
> +
> +#include "msm_drv.h"
> +
> +struct msm_gem_object {
> +	struct drm_gem_object base;
> +
> +	struct list_head mm_list;
> +
> +	uint32_t flags;
> +	struct page **pages;
> +	struct sg_table *sgt;
> +	void *vaddr;
> +
> +	struct {
> +		// XXX
> +		uint32_t iova;
> +	} domain[NUM_DOMAINS];
> +};
> +#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
> +
> +/* called with dev->struct_mutex held */
> +/* TODO move this into drm_gem.c */
> +static struct page **attach_pages(struct drm_gem_object *obj)
> +{
> +	struct inode *inode;
> +	struct address_space *mapping;
> +	struct page *p, **pages;
> +	int i, npages;
> +
> +	/* This is the shared memory object that backs the GEM resource */
> +	inode = file_inode(obj->filp);
> +	mapping = inode->i_mapping;
> +
> +	npages = obj->size >> PAGE_SHIFT;
> +
> +	pages = drm_malloc_ab(npages, sizeof(struct page *));
> +	if (pages == NULL)
> +		return ERR_PTR(-ENOMEM);
> +
> +	for (i = 0; i < npages; i++) {
> +		p = shmem_read_mapping_page(mapping, i);
> +		if (IS_ERR(p))
> +			goto fail;
> +		pages[i] = p;
> +	}
> +
> +	return pages;
> +
> +fail:
> +	while (i--)
> +		page_cache_release(pages[i]);
> +
> +	drm_free_large(pages);
> +	return ERR_CAST(p);
> +}
> +
> +static void detach_pages(struct drm_gem_object *obj, struct page **pages)
> +{
> +	int i, npages;
> +
> +	npages = obj->size >> PAGE_SHIFT;
> +
> +	for (i = 0; i < npages; i++) {
> +		set_page_dirty(pages[i]);
> +
> +		/* Undo the reference we took when populating the table */
> +		page_cache_release(pages[i]);
> +	}
> +
> +	drm_free_large(pages);
> +}
> +
> +
> +/* called with dev->struct_mutex held */
> +static struct page **get_pages(struct drm_gem_object *obj)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +
> +	if (!msm_obj->pages) {
> +		struct page **p = attach_pages(obj);
> +		int npages = obj->size >> PAGE_SHIFT;
> +
> +		if (IS_ERR(p)) {
> +			dev_err(obj->dev->dev, "could not get pages: %ld\n",
> +					PTR_ERR(p));
> +			return p;
> +		}
> +		msm_obj->pages = p;
> +		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
> +	}
> +
> +	return msm_obj->pages;
> +}
> +
> +static void put_pages(struct drm_gem_object *obj)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +
> +	if (!msm_obj->pages) {
> +		if (msm_obj->sgt) {
> +			sg_free_table(msm_obj->sgt);
> +			kfree(msm_obj->sgt);
> +		}
> +		detach_pages(obj, msm_obj->pages);
> +		msm_obj->pages = NULL;
> +	}
> +}
> +
> +int msm_gem_mmap_obj(struct drm_gem_object *obj,
> +		struct vm_area_struct *vma)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +
> +	vma->vm_flags &= ~VM_PFNMAP;
> +	vma->vm_flags |= VM_MIXEDMAP;
> +
> +	if (msm_obj->flags & MSM_BO_WC) {
> +		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
> +	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
> +		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
> +	} else {
> +		/*
> +		 * Shunt off cached objs to shmem file so they have their own
> +		 * address_space (so unmap_mapping_range does what we want,
> +		 * in particular in the case of mmap'd dmabufs)
> +		 */
> +		fput(vma->vm_file);
> +		get_file(obj->filp);
> +		vma->vm_pgoff = 0;
> +		vma->vm_file  = obj->filp;
> +
> +		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> +	}
> +
> +	return 0;
> +}
> +
> +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +	int ret;
> +
> +	ret = drm_gem_mmap(filp, vma);
> +	if (ret) {
> +		DBG("mmap failed: %d", ret);
> +		return ret;
> +	}
> +
> +	return msm_gem_mmap_obj(vma->vm_private_data, vma);
> +}
> +
> +int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> +{
> +	struct drm_gem_object *obj = vma->vm_private_data;
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	struct drm_device *dev = obj->dev;
> +	struct page **pages;
> +	unsigned long pfn;
> +	pgoff_t pgoff;
> +	int ret;
> +
> +	/* Make sure we don't parallel update on a fault, nor move or remove
> +	 * something from beneath our feet
> +	 */
> +	mutex_lock(&dev->struct_mutex);
> +
> +	/* make sure we have pages attached now */
> +	pages = get_pages(obj);
> +	if (IS_ERR(pages)) {
> +		ret = PTR_ERR(pages);
> +		goto out;
> +	}
> +
> +	/* We don't use vmf->pgoff since that has the fake offset: */
> +	pgoff = ((unsigned long)vmf->virtual_address -
> +			vma->vm_start) >> PAGE_SHIFT;
> +
> +	pfn = page_to_pfn(msm_obj->pages[pgoff]);
> +
> +	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
> +			pfn, pfn << PAGE_SHIFT);
> +
> +	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
> +
> +out:
> +	mutex_unlock(&dev->struct_mutex);
> +	switch (ret) {
> +	case 0:
> +	case -ERESTARTSYS:
> +	case -EINTR:
> +		return VM_FAULT_NOPAGE;
> +	case -ENOMEM:
> +		return VM_FAULT_OOM;
> +	default:
> +		return VM_FAULT_SIGBUS;
> +	}
> +}
> +
> +/** get mmap offset */
> +static uint64_t mmap_offset(struct drm_gem_object *obj)
> +{
> +	struct drm_device *dev = obj->dev;
> +
> +	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +	if (!obj->map_list.map) {
> +		/* Make it mmapable */
> +		int ret = drm_gem_create_mmap_offset(obj);
> +
> +		if (ret) {
> +			dev_err(dev->dev, "could not allocate mmap offset\n");
> +			return 0;
> +		}
> +	}
> +
> +	return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
> +}
> +
> +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
> +{
> +	uint64_t offset;
> +	mutex_lock(&obj->dev->struct_mutex);
> +	offset = mmap_offset(obj);
> +	mutex_unlock(&obj->dev->struct_mutex);
> +	return offset;
> +}
> +
> +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	int ret = 0;
> +
> +	mutex_lock(&obj->dev->struct_mutex);
> +	if (!msm_obj->domain[id].iova) {
> +		struct msm_drm_private *priv = obj->dev->dev_private;
> +		uint32_t offset = (uint32_t)mmap_offset(obj);
> +		get_pages(obj);
> +		ret = iommu_map_range(priv->iommus[id], offset,
> +				msm_obj->sgt->sgl, obj->size, IOMMU_READ);
> +		msm_obj->domain[id].iova = offset;
> +	}
> +	mutex_unlock(&obj->dev->struct_mutex);
> +
> +	if (!ret)
> +		*iova = msm_obj->domain[id].iova;
> +
> +	return ret;
> +}
> +
> +void msm_gem_put_iova(struct drm_gem_object *obj, int id)
> +{
> +}
> +
> +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
> +		struct drm_mode_create_dumb *args)
> +{
> +	args->pitch = align_pitch(args->width, args->bpp);
> +	args->size  = PAGE_ALIGN(args->pitch * args->height);
> +	return msm_gem_new_handle(dev, file, args->size,
> +			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
> +}
> +
> +int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
> +		uint32_t handle)
> +{
> +	/* No special work needed, drop the reference and see what falls out */
> +	return drm_gem_handle_delete(file, handle);
> +}
> +
> +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
> +		uint32_t handle, uint64_t *offset)
> +{
> +	struct drm_gem_object *obj;
> +	int ret = 0;
> +
> +	/* GEM does all our handle to object mapping */
> +	obj = drm_gem_object_lookup(dev, file, handle);
> +	if (obj == NULL) {
> +		ret = -ENOENT;
> +		goto fail;
> +	}
> +
> +	*offset = msm_gem_mmap_offset(obj);
> +
> +	drm_gem_object_unreference_unlocked(obj);
> +
> +fail:
> +	return ret;
> +}
> +
> +void *msm_gem_vaddr(struct drm_gem_object *obj)
> +{
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> +	if (!msm_obj->vaddr) {
> +		struct page **pages = get_pages(obj);
> +		if (IS_ERR(pages))
> +			return ERR_CAST(pages);
> +		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
> +				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
> +	}
> +	return msm_obj->vaddr;
> +}
> +
> +#ifdef CONFIG_DEBUG_FS
> +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
> +{
> +	struct drm_device *dev = obj->dev;
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	uint64_t off = 0;
> +
> +	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +	if (obj->map_list.map)
> +		off = (uint64_t)obj->map_list.hash.key;
> +
> +	seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
> +			msm_obj->flags, obj->name, obj->refcount.refcount.counter,
> +			off, msm_obj->vaddr, obj->size);
> +}
> +
> +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
> +{
> +	struct msm_gem_object *msm_obj;
> +	int count = 0;
> +	size_t size = 0;
> +
> +	list_for_each_entry(msm_obj, list, mm_list) {
> +		struct drm_gem_object *obj = &msm_obj->base;
> +		seq_printf(m, "   ");
> +		msm_gem_describe(obj, m);
> +		count++;
> +		size += obj->size;
> +	}
> +
> +	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
> +}
> +#endif
> +
> +void msm_gem_free_object(struct drm_gem_object *obj)
> +{
> +	struct drm_device *dev = obj->dev;
> +	struct msm_gem_object *msm_obj = to_msm_bo(obj);
> +	int id;
> +
> +	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +	list_del(&msm_obj->mm_list);
> +
> +	if (obj->map_list.map)
> +		drm_gem_free_mmap_offset(obj);
> +
> +	if (msm_obj->vaddr)
> +		vunmap(msm_obj->vaddr);
> +
> +	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
> +		if (msm_obj->domain[id].iova) {
> +			struct msm_drm_private *priv = obj->dev->dev_private;
> +			uint32_t offset = (uint32_t)mmap_offset(obj);
> +			iommu_unmap_range(priv->iommus[id], offset, obj->size);
> +		}
> +	}
> +
> +	put_pages(obj);
> +
> +	drm_gem_object_release(obj);
> +
> +	kfree(obj);
> +}
> +
> +/* convenience method to construct a GEM buffer object, and userspace handle */
> +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
> +		uint32_t size, uint32_t flags, uint32_t *handle)
> +{
> +	struct drm_gem_object *obj;
> +	int ret;
> +
> +	obj = msm_gem_new(dev, size, flags);
> +	if (!obj)
> +		return -ENOMEM;
> +
> +	ret = drm_gem_handle_create(file, obj, handle);
> +
> +	/* drop reference from allocate - handle holds it now */
> +	drm_gem_object_unreference_unlocked(obj);
> +
> +	return ret;
> +}
> +
> +struct drm_gem_object *msm_gem_new(struct drm_device *dev,
> +		uint32_t size, uint32_t flags)
> +{
> +	struct msm_drm_private *priv = dev->dev_private;
> +	struct msm_gem_object *msm_obj;
> +	struct drm_gem_object *obj = NULL;
> +	int ret;
> +
> +	size = PAGE_ALIGN(size);
> +
> +	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
> +	if (!msm_obj)
> +		goto fail;
> +
> +	obj = &msm_obj->base;
> +
> +	ret = drm_gem_object_init(dev, obj, size);
> +	if (ret)
> +		goto fail;
> +
> +	msm_obj->flags = flags;
> +
> +	mutex_lock(&obj->dev->struct_mutex);
> +	list_add(&msm_obj->mm_list, &priv->obj_list);
> +	mutex_unlock(&obj->dev->struct_mutex);
> +
> +	return obj;
> +
> +fail:
> +	if (obj)
> +		drm_gem_object_unreference_unlocked(obj);
> +
> +	return NULL;
> +}
>

Yay GEM.  No complaints here.

Jordan
Rob Clark July 9, 2013, 5:15 a.m. UTC | #2
On Mon, Jul 8, 2013 at 7:17 PM, Jordan Crouse <jcrouse@codeaurora.org> wrote:
> On 07/05/2013 01:53 PM, Rob Clark wrote:
>>
>> The snapdragon chips have multiple different display controllers,
>> depending on which chip variant/version.  (As far as I can tell, current
>> devices have either MDP3 or MDP4, and upcoming devices have MDSS.)  And
>> then external to the display controller are HDMI, DSI, etc. blocks which
>> may be shared across devices which have different display controller
>> blocks.
>>
>> To more easily add support for different display controller blocks, the
>> display controller specific bits are split out into a "kms" object,
>> which provides the kms plane/crtc/encoder objects.
>>
>> The external HDMI, DSI, etc. blocks are part encoder, and part connector
>> currently.  But I think I will pull in the drm_bridge patches from
>> chromeos tree, and split them into a bridge+connector, with the
>> registers that need to be set in modeset handled by the bridge.  This
>> would remove the 'msm_connector' base class.  But some things need to be
>> double checked to make sure I could get the correct ON/OFF sequencing..
>>
>> Signed-off-by: Rob Clark <robdclark@gmail.com>
>
>
>> diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
>> new file mode 100644
>> index 0000000..b9e9d03
>> --- /dev/null
>> +++ b/drivers/gpu/drm/msm/NOTES
>> @@ -0,0 +1,43 @@
>> +Rough thoughts/notes..
>> +
>> +We have (at least) 3 different display controller blocks at play:
>> + + MDP3 - ?? seems to be what is on geeksphone peak device
>> + + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
>> + + MDSS - snapdragon 800
>> +
>> +(I don't have a completely clear picture on which display controller
>> +is in which devices)
>> +
>> +But, HDMI/DSI/etc blocks seem like they can be shared.  And I for sure
>> +don't want to have to deal with N different kms devices from
>> +xf86-video-freedreno.  Plus, it seems like we can do some clever tricks
>> +like have kms/crtc code build up gpu cmdstream to update scanout after
>> +rendering without involving the cpu.
>> +
>> +And on gpu side of things:
>> + + zero, one, or two 2d cores (z180)
>
>
> Life would be easier if we just forgot that z180 existed.
>

I would like to support it eventually, although not the highest
priority.  Although I'm not quite sure yet about how to do a sane
kernel interface for it.. I might just take the easy way out and
memcpy.  Regarding extra level of indirection, well it doesn't
absolutely *have* to be the same ioctl..  I do need to give it some
thought though.

>
>> + + and either a2xx or a3xx 3d core.
>
>
> A2XX will probably be less interesting to everybody except folks trying to
> get their ancient phones working.  That said it might be smart to keep the
> GPU sub device split because future.
>

I would like to support a2xx as well, if for no other reason than that
I have a handful of a2xx devices as well.  (Although sometimes there
is a shortage of # of hrs in a day.)

>
>> +
>> +So, one drm driver, with some modularity.  Different 'struct msm_kms'
>> +implementations, depending on display controller.  And one or more
>> +'struct msm_gpu' for the various different gpu sub-modules.
>
>
> If Z180 goes poof then we could conceivably use 'adreno' for a name which
> is a nice way to compartmentalize the GPU code.  On the other hand msm_gpu
> has consistency going for it.
>

I suppose depending on what marketing literature you read, "adreno"
could refer collectively to 2d and 3d cores.  But meh.  I could go
either way on the name.

>
>> +The kms module provides the plane, crtc, and encoder objects, and
>> +loads whatever connectors are appropriate.
>> +
>> +For MDP4, the mapping is (I think):
>> +
>> +  plane   -> PIPE{RGBn,VGn}              \
>> +  crtc    -> OVLP{n} + DMA{P,S,E} (??)   |-> MDP "device"
>> +  encoder -> DTV/LCDC/DSI (within MDP4)  /
>> +  connector -> HDMI/DSI/etc              --> other device(s)
>> +
>> +Since the irq's that drm core mostly cares about are vblank/framedone,
>> +we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
>> +and treat the MDP4 block's irq as "the" irq.  Even though the connectors
>> +may have their own irqs which they install themselves.  For this reason
>> +the display controller is the "master" device.
>> +
>> +Each connector probably ends up being a seperate device, just for the
>> +logistics of finding/mapping io region, irq, etc.
>> +
>
>
>> diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
>> new file mode 100644
>> index 0000000..e6ccef9
>> --- /dev/null
>> +++ b/drivers/gpu/drm/msm/msm_drv.c
>> @@ -0,0 +1,491 @@
>>
>> +/*
>> + * Copyright (C) 2013 Red Hat
>> + * Author: Rob Clark <robdclark@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> it
>> + * under the terms of the GNU General Public License version 2 as
>> published by
>> + * the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful, but
>> WITHOUT
>> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
>> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
>> for
>> + * more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> along with
>> + * this program.  If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include "msm_drv.h"
>> +
>> +static void msm_fb_output_poll_changed(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       if (priv->fbdev)
>> +               drm_fb_helper_hotplug_event(priv->fbdev);
>> +}
>> +
>> +static const struct drm_mode_config_funcs mode_config_funcs = {
>> +       .fb_create = msm_framebuffer_create,
>> +       .output_poll_changed = msm_fb_output_poll_changed,
>> +};
>> +
>> +static int msm_fault_handler(struct iommu_domain *iommu, struct device
>> *dev,
>> +               unsigned long iova, int flags)
>> +{
>> +       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
>>
>> +       return 0;
>> +}
>> +
>> +int msm_register_iommu(struct drm_device *dev, struct iommu_domain
>> *iommu)
>>
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       int idx = priv->num_iommus++;
>> +
>> +       if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
>> +               return -EINVAL;
>> +
>> +       priv->iommus[idx] = iommu;
>> +
>> +       iommu_set_fault_handler(iommu, msm_fault_handler);
>> +
>> +       /* need to iommu_attach_device() somewhere??  on resume?? */
>
>
> We are going to end up with 2 IOMMUs to deal with.

Oh, yeah, I did figure out the attach stuff eventually, but forgot to
remove that note to myself.

Rough plan is that different initiators (display, gpu, etc) request
iova in a particular device-space (msm_gem_get_iova()), and the gem
object keeps track of the device address in each domain that it is
mapped.

Well, I'm still thinking about the best way to deal with per-context
address space for GPU.  One easy way is just use same address space in
each context (although only with buffers shared to that context being
mapped).  That should work ok-ish, at least for newer GPU's with large
address space.  But I'm not super concerned about getting that part
right up-front, because it won't be visible in the user<->kernel ABI
so it is something that can be changed later.

>
>> +       return idx;
>> +}
>> +
>> +#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
>> +static bool reglog = false;
>> +MODULE_PARM_DESC(reglog, "Enable register read/write logging");
>> +module_param(reglog, bool, 0600);
>> +#else
>> +#define reglog 0
>> +#endif
>> +
>> +void __iomem *msm_ioremap(struct device *dev, resource_size_t offset,
>> +               unsigned long size, const char *name)
>> +{
>> +       void __iomem *ptr = devm_ioremap_nocache(dev, offset, size);
>> +       if (reglog)
>> +               printk(KERN_DEBUG "IO:region %s %08x %08lx\n", name,
>> (u32)ptr, size);
>> +       return ptr;
>> +}
>> +
>> +void msm_writel(u32 data, void __iomem *addr)
>> +{
>> +       if (reglog)
>> +               printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
>> +       writel(data, addr);
>> +}
>> +
>> +u32 msm_readl(const void __iomem *addr)
>> +{
>> +       u32 val = readl(addr);
>> +       if (reglog)
>> +               printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
>> +       return val;
>> +}
>> +
>> +/*
>> + * DRM operations:
>> + */
>> +
>> +static int msm_unload(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +
>> +       drm_kms_helper_poll_fini(dev);
>> +       drm_mode_config_cleanup(dev);
>> +       drm_vblank_cleanup(dev);
>> +
>> +       pm_runtime_get_sync(dev->dev);
>> +       drm_irq_uninstall(dev);
>> +       pm_runtime_put_sync(dev->dev);
>> +
>> +       flush_workqueue(priv->wq);
>> +       destroy_workqueue(priv->wq);
>> +
>> +       if (kms) {
>> +               pm_runtime_disable(dev->dev);
>> +               kms->funcs->destroy(kms);
>> +       }
>> +
>> +       dev->dev_private = NULL;
>> +
>> +       pm_runtime_disable(dev->dev);
>> +
>> +       kfree(priv);
>>
>> +
>> +       return 0;
>> +}
>> +
>> +static int msm_load(struct drm_device *dev, unsigned long flags)
>>
>> +{
>> +       struct platform_device *pdev = dev->platformdev;
>> +       struct msm_drm_private *priv;
>> +       struct msm_kms *kms;
>> +       int ret;
>> +
>> +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
>> +       if (!priv) {
>> +               dev_err(dev->dev, "failed to allocate private data\n");
>> +               return -ENOMEM;
>> +       }
>> +
>> +       dev->dev_private = priv;
>> +
>> +       priv->wq = alloc_ordered_workqueue("msm", 0);
>> +
>> +       INIT_LIST_HEAD(&priv->obj_list);
>> +
>> +       drm_mode_config_init(dev);
>> +
>> +       kms = mdp4_kms_init(dev);
>> +       if (IS_ERR(kms)) {
>> +               /*
>> +                * NOTE: once we have GPU support, having no kms should
>> not
>> +                * be considered fatal.. ideally we would still support
>> gpu
>> +                * and (for example) use dmabuf/prime to share buffers
>> with
>> +                * imx drm driver on iMX5
>> +                */
>> +               dev_err(dev->dev, "failed to load kms\n");
>> +               ret = PTR_ERR(priv->kms);
>>
>> +               goto fail;
>> +       }
>> +
>> +       priv->kms = kms;
>> +
>> +       if (kms) {
>> +               pm_runtime_enable(dev->dev);
>> +               ret = kms->funcs->hw_init(kms);
>> +               if (ret) {
>> +                       dev_err(dev->dev, "kms hw init failed: %d\n",
>> ret);
>>
>> +                       goto fail;
>> +               }
>> +       }
>> +
>> +       dev->mode_config.min_width = 0;
>> +       dev->mode_config.min_height = 0;
>> +       dev->mode_config.max_width = 2048;
>> +       dev->mode_config.max_height = 2048;
>> +       dev->mode_config.funcs = &mode_config_funcs;
>> +
>> +       ret = drm_vblank_init(dev, 1);
>> +       if (ret < 0) {
>> +               dev_err(dev->dev, "failed to initialize vblank\n");
>>
>> +               goto fail;
>> +       }
>> +
>> +       pm_runtime_get_sync(dev->dev);
>> +       ret = drm_irq_install(dev);
>> +       pm_runtime_put_sync(dev->dev);
>> +       if (ret < 0) {
>> +               dev_err(dev->dev, "failed to install IRQ handler\n");
>>
>> +               goto fail;
>> +       }
>> +
>> +       platform_set_drvdata(pdev, dev);
>> +
>> +#ifdef CONFIG_DRM_MSM_FBDEV
>> +       priv->fbdev = msm_fbdev_init(dev);
>> +#endif
>> +
>> +       drm_kms_helper_poll_init(dev);
>>
>> +
>> +       return 0;
>> +
>> +fail:
>> +       msm_unload(dev);
>>
>> +       return ret;
>> +}
>> +
>> +static void msm_preclose(struct drm_device *dev, struct drm_file *file)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       if (kms)
>> +               kms->funcs->preclose(kms, file);
>> +}
>> +
>> +static void msm_lastclose(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       if (priv->fbdev) {
>> +               drm_modeset_lock_all(dev);
>> +               drm_fb_helper_restore_fbdev_mode(priv->fbdev);
>> +               drm_modeset_unlock_all(dev);
>> +       }
>> +}
>> +
>> +static irqreturn_t msm_irq(DRM_IRQ_ARGS)
>> +{
>> +       struct drm_device *dev = arg;
>>
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       BUG_ON(!kms);
>> +       return kms->funcs->irq(kms);
>
>
> And we will have separate interrupts too - has anybody else had to
> deal with that (too lazy to check).

IIRC, exynos, and perhaps some others do..

This is actually already the case in msm kms code, since HDMI block
has it's own irq (for HPD and DDC).  My thinking here is that every
"module" with interrupts not vblank related, that module can
separately register register it's own handler.  It is partly an
arbitrary decision, but seemed to make sense to me, because DRM core
doesn't really care too much about interrupts beyond vblank.

BR,
-R

>> +}
>> +
>> +static void msm_irq_preinstall(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       BUG_ON(!kms);
>> +       kms->funcs->irq_preinstall(kms);
>> +}
>> +
>> +static int msm_irq_postinstall(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       BUG_ON(!kms);
>> +       return kms->funcs->irq_postinstall(kms);
>> +}
>> +
>> +static void msm_irq_uninstall(struct drm_device *dev)
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       BUG_ON(!kms);
>> +       kms->funcs->irq_uninstall(kms);
>> +}
>> +
>> +static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
>>
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       if (!kms)
>> +               return -ENXIO;
>> +       DBG("dev=%p, crtc=%d", dev, crtc_id);
>> +       return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
>> +}
>> +
>> +static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
>>
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +       if (!kms)
>> +               return;
>> +       DBG("dev=%p, crtc=%d", dev, crtc_id);
>> +       kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
>> +}
>> +
>> +#ifdef CONFIG_DEBUG_FS
>> +static int msm_gem_show(struct seq_file *m, void *arg)
>> +{
>> +       struct drm_info_node *node = (struct drm_info_node *) m->private;
>> +       struct drm_device *dev = node->minor->dev;
>>
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       int ret;
>> +
>> +       ret = mutex_lock_interruptible(&dev->struct_mutex);
>>
>> +       if (ret)
>> +               return ret;
>> +
>> +       seq_printf(m, "All Objects:\n");
>> +       msm_gem_describe_objects(&priv->obj_list, m);
>> +
>> +       mutex_unlock(&dev->struct_mutex);
>>
>> +
>> +       return 0;
>> +}
>> +
>> +static int msm_mm_show(struct seq_file *m, void *arg)
>> +{
>> +       struct drm_info_node *node = (struct drm_info_node *) m->private;
>> +       struct drm_device *dev = node->minor->dev;
>> +       return drm_mm_dump_table(m, dev->mm_private);
>> +}
>> +
>> +static int msm_fb_show(struct seq_file *m, void *arg)
>> +{
>> +       struct drm_info_node *node = (struct drm_info_node *) m->private;
>> +       struct drm_device *dev = node->minor->dev;
>>
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct drm_framebuffer *fb, *fbdev_fb = NULL;
>> +
>> +       if (priv->fbdev) {
>> +               seq_printf(m, "fbcon ");
>> +               fbdev_fb = priv->fbdev->fb;
>> +               msm_framebuffer_describe(fbdev_fb, m);
>> +       }
>> +
>> +       mutex_lock(&dev->mode_config.fb_lock);
>> +       list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
>> +               if (fb == fbdev_fb)
>> +                       continue;
>> +
>> +               seq_printf(m, "user ");
>> +               msm_framebuffer_describe(fb, m);
>> +       }
>> +       mutex_unlock(&dev->mode_config.fb_lock);
>>
>> +
>> +       return 0;
>> +}
>> +
>> +static struct drm_info_list msm_debugfs_list[] = {
>> +               {"gem", msm_gem_show, 0},
>> +               { "mm", msm_mm_show,   0 },
>> +               { "fb", msm_fb_show, 0 },
>> +};
>> +
>> +static int msm_debugfs_init(struct drm_minor *minor)
>> +{
>> +       struct drm_device *dev = minor->dev;
>> +       int ret;
>> +
>> +       ret = drm_debugfs_create_files(msm_debugfs_list,
>> +                       ARRAY_SIZE(msm_debugfs_list),
>> +                       minor->debugfs_root, minor);
>> +
>> +       if (ret) {
>> +               dev_err(dev->dev, "could not install msm_debugfs_list\n");
>>
>> +               return ret;
>> +       }
>> +
>> +       return ret;
>> +}
>> +
>> +static void msm_debugfs_cleanup(struct drm_minor *minor)
>> +{
>> +       drm_debugfs_remove_files(msm_debugfs_list,
>> +                       ARRAY_SIZE(msm_debugfs_list), minor);
>> +}
>> +#endif
>> +
>> +static const struct vm_operations_struct vm_ops = {
>> +       .fault = msm_gem_fault,
>> +       .open = drm_gem_vm_open,
>> +       .close = drm_gem_vm_close,
>> +};
>> +
>> +static const struct file_operations fops = {
>> +       .owner              = THIS_MODULE,
>> +       .open               = drm_open,
>> +       .release            = drm_release,
>> +       .unlocked_ioctl     = drm_ioctl,
>> +#ifdef CONFIG_COMPAT
>> +       .compat_ioctl       = drm_compat_ioctl,
>> +#endif
>> +       .poll               = drm_poll,
>> +       .read               = drm_read,
>> +       .fasync             = drm_fasync,
>> +       .llseek             = no_llseek,
>> +       .mmap               = msm_gem_mmap,
>> +};
>> +
>> +static struct drm_driver msm_driver = {
>> +       .driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM |
>> DRIVER_MODESET,
>> +       .load               = msm_load,
>> +       .unload             = msm_unload,
>> +       .preclose           = msm_preclose,
>> +       .lastclose          = msm_lastclose,
>> +       .irq_handler        = msm_irq,
>> +       .irq_preinstall     = msm_irq_preinstall,
>> +       .irq_postinstall    = msm_irq_postinstall,
>> +       .irq_uninstall      = msm_irq_uninstall,
>> +       .get_vblank_counter = drm_vblank_count,
>> +       .enable_vblank      = msm_enable_vblank,
>> +       .disable_vblank     = msm_disable_vblank,
>> +       .gem_free_object    = msm_gem_free_object,
>> +       .gem_vm_ops         = &vm_ops,
>> +       .dumb_create        = msm_gem_dumb_create,
>> +       .dumb_map_offset    = msm_gem_dumb_map_offset,
>> +       .dumb_destroy       = msm_gem_dumb_destroy,
>> +#ifdef CONFIG_DEBUG_FS
>> +       .debugfs_init       = msm_debugfs_init,
>> +       .debugfs_cleanup    = msm_debugfs_cleanup,
>> +#endif
>> +       .fops               = &fops,
>> +       .name               = "msm",
>> +       .desc               = "MSM Snapdragon DRM",
>> +       .date               = "20130625",
>> +       .major              = 1,
>> +       .minor              = 0,
>> +};
>> +
>> +#ifdef CONFIG_PM_SLEEP
>> +static int msm_pm_suspend(struct device *dev)
>> +{
>> +       struct drm_device *ddev = dev_get_drvdata(dev);
>> +       struct msm_drm_private *priv = ddev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>> +
>> +       drm_kms_helper_poll_disable(ddev);
>> +
>> +       return kms->funcs->pm_suspend(kms);
>> +}
>> +
>> +static int msm_pm_resume(struct device *dev)
>> +{
>> +       struct drm_device *ddev = dev_get_drvdata(dev);
>> +       struct msm_drm_private *priv = ddev->dev_private;
>> +       struct msm_kms *kms = priv->kms;
>>
>> +       int ret = 0;
>> +
>> +       ret = kms->funcs->pm_resume(kms);
>>
>> +       if (ret)
>> +               return ret;
>> +
>> +       drm_kms_helper_poll_enable(ddev);
>>
>> +
>> +       return 0;
>> +}
>> +#endif
>> +
>> +static const struct dev_pm_ops msm_pm_ops = {
>> +       SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
>> +};
>> +
>> +/*
>> + * Platform driver:
>> + */
>> +
>> +static int msm_pdev_probe(struct platform_device *pdev)
>> +{
>> +       return drm_platform_init(&msm_driver, pdev);
>> +}
>> +
>> +static int msm_pdev_remove(struct platform_device *pdev)
>> +{
>> +       drm_platform_exit(&msm_driver, pdev);
>>
>> +
>> +       return 0;
>> +}
>> +
>> +static const struct platform_device_id msm_id[] = {
>> +       { "mdp", 0 },
>> +       { }
>> +};
>> +
>> +static struct platform_driver msm_platform_driver = {
>> +       .probe      = msm_pdev_probe,
>> +       .remove     = msm_pdev_remove,
>> +       .driver     = {
>> +               .owner  = THIS_MODULE,
>> +               .name   = "msm",
>> +               .pm     = &msm_pm_ops,
>> +       },
>> +       .id_table   = msm_id,
>> +};
>> +
>> +static int __init msm_drm_init(void)
>> +{
>> +       DBG("init");
>> +       hdmi_init();
>> +       return platform_driver_register(&msm_platform_driver);
>> +}
>> +
>> +static void __exit msm_drm_fini(void)
>> +{
>> +       DBG("fini");
>> +       platform_driver_unregister(&msm_platform_driver);
>> +       hdmi_fini();
>> +}
>> +
>> +module_init(msm_drm_init);
>> +module_exit(msm_drm_fini);
>> +
>> +MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
>> +MODULE_DESCRIPTION("MSM DRM Driver");
>> +MODULE_LICENSE("GPL");
>
>
>> diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
>> new file mode 100644
>> index 0000000..a996490
>> --- /dev/null
>> +++ b/drivers/gpu/drm/msm/msm_gem.c
>> @@ -0,0 +1,441 @@
>>
>> +/*
>> + * Copyright (C) 2013 Red Hat
>> + * Author: Rob Clark <robdclark@gmail.com>
>> + *
>> + * This program is free software; you can redistribute it and/or modify
>> it
>> + * under the terms of the GNU General Public License version 2 as
>> published by
>> + * the Free Software Foundation.
>> + *
>> + * This program is distributed in the hope that it will be useful, but
>> WITHOUT
>> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
>> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
>> for
>> + * more details.
>> + *
>> + * You should have received a copy of the GNU General Public License
>> along with
>> + * this program.  If not, see <http://www.gnu.org/licenses/>.
>> + */
>> +
>> +#include <linux/spinlock.h>
>> +#include <linux/shmem_fs.h>
>> +
>> +#include "msm_drv.h"
>> +
>> +struct msm_gem_object {
>> +       struct drm_gem_object base;
>> +
>> +       struct list_head mm_list;
>> +
>> +       uint32_t flags;
>> +       struct page **pages;
>> +       struct sg_table *sgt;
>> +       void *vaddr;
>> +
>> +       struct {
>> +               // XXX
>> +               uint32_t iova;
>> +       } domain[NUM_DOMAINS];
>> +};
>> +#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
>> +
>> +/* called with dev->struct_mutex held */
>> +/* TODO move this into drm_gem.c */
>> +static struct page **attach_pages(struct drm_gem_object *obj)
>> +{
>> +       struct inode *inode;
>> +       struct address_space *mapping;
>> +       struct page *p, **pages;
>> +       int i, npages;
>> +
>> +       /* This is the shared memory object that backs the GEM resource */
>> +       inode = file_inode(obj->filp);
>> +       mapping = inode->i_mapping;
>> +
>> +       npages = obj->size >> PAGE_SHIFT;
>> +
>> +       pages = drm_malloc_ab(npages, sizeof(struct page *));
>> +       if (pages == NULL)
>> +               return ERR_PTR(-ENOMEM);
>> +
>> +       for (i = 0; i < npages; i++) {
>> +               p = shmem_read_mapping_page(mapping, i);
>> +               if (IS_ERR(p))
>> +                       goto fail;
>> +               pages[i] = p;
>> +       }
>> +
>> +       return pages;
>> +
>> +fail:
>> +       while (i--)
>> +               page_cache_release(pages[i]);
>> +
>> +       drm_free_large(pages);
>> +       return ERR_CAST(p);
>> +}
>> +
>> +static void detach_pages(struct drm_gem_object *obj, struct page **pages)
>> +{
>> +       int i, npages;
>> +
>> +       npages = obj->size >> PAGE_SHIFT;
>> +
>> +       for (i = 0; i < npages; i++) {
>> +               set_page_dirty(pages[i]);
>> +
>> +               /* Undo the reference we took when populating the table */
>> +               page_cache_release(pages[i]);
>> +       }
>> +
>> +       drm_free_large(pages);
>> +}
>> +
>> +
>> +/* called with dev->struct_mutex held */
>> +static struct page **get_pages(struct drm_gem_object *obj)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +
>> +       if (!msm_obj->pages) {
>> +               struct page **p = attach_pages(obj);
>> +               int npages = obj->size >> PAGE_SHIFT;
>> +
>> +               if (IS_ERR(p)) {
>> +                       dev_err(obj->dev->dev, "could not get pages:
>> %ld\n",
>> +                                       PTR_ERR(p));
>> +                       return p;
>> +               }
>> +               msm_obj->pages = p;
>> +               msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
>> +       }
>> +
>> +       return msm_obj->pages;
>> +}
>> +
>> +static void put_pages(struct drm_gem_object *obj)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +
>> +       if (!msm_obj->pages) {
>> +               if (msm_obj->sgt) {
>> +                       sg_free_table(msm_obj->sgt);
>> +                       kfree(msm_obj->sgt);
>> +               }
>> +               detach_pages(obj, msm_obj->pages);
>> +               msm_obj->pages = NULL;
>> +       }
>> +}
>> +
>> +int msm_gem_mmap_obj(struct drm_gem_object *obj,
>> +               struct vm_area_struct *vma)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +
>> +       vma->vm_flags &= ~VM_PFNMAP;
>> +       vma->vm_flags |= VM_MIXEDMAP;
>> +
>> +       if (msm_obj->flags & MSM_BO_WC) {
>> +               vma->vm_page_prot =
>> pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
>> +       } else if (msm_obj->flags & MSM_BO_UNCACHED) {
>> +               vma->vm_page_prot =
>> pgprot_noncached(vm_get_page_prot(vma->vm_flags));
>> +       } else {
>> +               /*
>> +                * Shunt off cached objs to shmem file so they have their
>> own
>> +                * address_space (so unmap_mapping_range does what we
>> want,
>> +                * in particular in the case of mmap'd dmabufs)
>> +                */
>> +               fput(vma->vm_file);
>> +               get_file(obj->filp);
>> +               vma->vm_pgoff = 0;
>> +               vma->vm_file  = obj->filp;
>> +
>> +               vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
>>
>> +       }
>> +
>> +       return 0;
>> +}
>> +
>> +int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
>> +{
>> +       int ret;
>> +
>> +       ret = drm_gem_mmap(filp, vma);
>> +       if (ret) {
>> +               DBG("mmap failed: %d", ret);
>>
>> +               return ret;
>> +       }
>> +
>> +       return msm_gem_mmap_obj(vma->vm_private_data, vma);
>> +}
>> +
>> +int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
>> +{
>> +       struct drm_gem_object *obj = vma->vm_private_data;
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +       struct drm_device *dev = obj->dev;
>> +       struct page **pages;
>> +       unsigned long pfn;
>> +       pgoff_t pgoff;
>> +       int ret;
>> +
>> +       /* Make sure we don't parallel update on a fault, nor move or
>> remove
>> +        * something from beneath our feet
>> +        */
>> +       mutex_lock(&dev->struct_mutex);
>> +
>> +       /* make sure we have pages attached now */
>> +       pages = get_pages(obj);
>> +       if (IS_ERR(pages)) {
>> +               ret = PTR_ERR(pages);
>>
>> +               goto out;
>> +       }
>> +
>> +       /* We don't use vmf->pgoff since that has the fake offset: */
>> +       pgoff = ((unsigned long)vmf->virtual_address -
>> +                       vma->vm_start) >> PAGE_SHIFT;
>> +
>> +       pfn = page_to_pfn(msm_obj->pages[pgoff]);
>> +
>> +       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
>> +                       pfn, pfn << PAGE_SHIFT);
>> +
>> +       ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
>> pfn);
>> +
>> +out:
>> +       mutex_unlock(&dev->struct_mutex);
>> +       switch (ret) {
>> +       case 0:
>> +       case -ERESTARTSYS:
>> +       case -EINTR:
>> +               return VM_FAULT_NOPAGE;
>> +       case -ENOMEM:
>> +               return VM_FAULT_OOM;
>> +       default:
>> +               return VM_FAULT_SIGBUS;
>> +       }
>> +}
>> +
>> +/** get mmap offset */
>> +static uint64_t mmap_offset(struct drm_gem_object *obj)
>> +{
>> +       struct drm_device *dev = obj->dev;
>> +
>> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>> +
>> +       if (!obj->map_list.map) {
>> +               /* Make it mmapable */
>> +               int ret = drm_gem_create_mmap_offset(obj);
>> +
>> +               if (ret) {
>> +                       dev_err(dev->dev, "could not allocate mmap
>> offset\n");
>>
>> +                       return 0;
>> +               }
>> +       }
>> +
>> +       return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
>> +}
>> +
>> +uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
>> +{
>> +       uint64_t offset;
>> +       mutex_lock(&obj->dev->struct_mutex);
>> +       offset = mmap_offset(obj);
>> +       mutex_unlock(&obj->dev->struct_mutex);
>> +       return offset;
>> +}
>> +
>> +int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>>
>> +       int ret = 0;
>> +
>> +       mutex_lock(&obj->dev->struct_mutex);
>> +       if (!msm_obj->domain[id].iova) {
>> +               struct msm_drm_private *priv = obj->dev->dev_private;
>> +               uint32_t offset = (uint32_t)mmap_offset(obj);
>> +               get_pages(obj);
>> +               ret = iommu_map_range(priv->iommus[id], offset,
>> +                               msm_obj->sgt->sgl, obj->size, IOMMU_READ);
>> +               msm_obj->domain[id].iova = offset;
>> +       }
>> +       mutex_unlock(&obj->dev->struct_mutex);
>> +
>> +       if (!ret)
>> +               *iova = msm_obj->domain[id].iova;
>>
>> +
>> +       return ret;
>> +}
>> +
>> +void msm_gem_put_iova(struct drm_gem_object *obj, int id)
>> +{
>> +}
>> +
>> +int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
>> +               struct drm_mode_create_dumb *args)
>> +{
>> +       args->pitch = align_pitch(args->width, args->bpp);
>> +       args->size  = PAGE_ALIGN(args->pitch * args->height);
>> +       return msm_gem_new_handle(dev, file, args->size,
>> +                       MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
>> +}
>> +
>> +int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
>> +               uint32_t handle)
>> +{
>> +       /* No special work needed, drop the reference and see what falls
>> out */
>> +       return drm_gem_handle_delete(file, handle);
>> +}
>> +
>> +int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device
>> *dev,
>> +               uint32_t handle, uint64_t *offset)
>> +{
>> +       struct drm_gem_object *obj;
>>
>> +       int ret = 0;
>> +
>> +       /* GEM does all our handle to object mapping */
>> +       obj = drm_gem_object_lookup(dev, file, handle);
>> +       if (obj == NULL) {
>> +               ret = -ENOENT;
>>
>> +               goto fail;
>> +       }
>> +
>> +       *offset = msm_gem_mmap_offset(obj);
>> +
>> +       drm_gem_object_unreference_unlocked(obj);
>>
>> +
>> +fail:
>> +       return ret;
>> +}
>> +
>> +void *msm_gem_vaddr(struct drm_gem_object *obj)
>> +{
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
>> +       if (!msm_obj->vaddr) {
>> +               struct page **pages = get_pages(obj);
>> +               if (IS_ERR(pages))
>> +                       return ERR_CAST(pages);
>> +               msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
>> +                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
>> +       }
>> +       return msm_obj->vaddr;
>> +}
>> +
>> +#ifdef CONFIG_DEBUG_FS
>> +void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
>> +{
>> +       struct drm_device *dev = obj->dev;
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +       uint64_t off = 0;
>> +
>> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>> +
>> +       if (obj->map_list.map)
>> +               off = (uint64_t)obj->map_list.hash.key;
>> +
>> +       seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
>> +                       msm_obj->flags, obj->name,
>> obj->refcount.refcount.counter,
>> +                       off, msm_obj->vaddr, obj->size);
>> +}
>> +
>> +void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
>> +{
>> +       struct msm_gem_object *msm_obj;
>> +       int count = 0;
>> +       size_t size = 0;
>> +
>> +       list_for_each_entry(msm_obj, list, mm_list) {
>> +               struct drm_gem_object *obj = &msm_obj->base;
>> +               seq_printf(m, "   ");
>> +               msm_gem_describe(obj, m);
>> +               count++;
>> +               size += obj->size;
>> +       }
>> +
>> +       seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
>> +}
>> +#endif
>> +
>> +void msm_gem_free_object(struct drm_gem_object *obj)
>> +{
>> +       struct drm_device *dev = obj->dev;
>> +       struct msm_gem_object *msm_obj = to_msm_bo(obj);
>> +       int id;
>> +
>> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
>> +
>> +       list_del(&msm_obj->mm_list);
>> +
>> +       if (obj->map_list.map)
>> +               drm_gem_free_mmap_offset(obj);
>> +
>> +       if (msm_obj->vaddr)
>> +               vunmap(msm_obj->vaddr);
>> +
>> +       for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
>> +               if (msm_obj->domain[id].iova) {
>> +                       struct msm_drm_private *priv =
>> obj->dev->dev_private;
>> +                       uint32_t offset = (uint32_t)mmap_offset(obj);
>> +                       iommu_unmap_range(priv->iommus[id], offset,
>> obj->size);
>> +               }
>> +       }
>> +
>> +       put_pages(obj);
>> +
>> +       drm_gem_object_release(obj);
>> +
>> +       kfree(obj);
>> +}
>> +
>> +/* convenience method to construct a GEM buffer object, and userspace
>> handle */
>> +int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
>> +               uint32_t size, uint32_t flags, uint32_t *handle)
>> +{
>> +       struct drm_gem_object *obj;
>> +       int ret;
>> +
>> +       obj = msm_gem_new(dev, size, flags);
>> +       if (!obj)
>> +               return -ENOMEM;
>> +
>> +       ret = drm_gem_handle_create(file, obj, handle);
>> +
>> +       /* drop reference from allocate - handle holds it now */
>> +       drm_gem_object_unreference_unlocked(obj);
>>
>> +
>> +       return ret;
>> +}
>> +
>> +struct drm_gem_object *msm_gem_new(struct drm_device *dev,
>> +               uint32_t size, uint32_t flags)
>>
>> +{
>> +       struct msm_drm_private *priv = dev->dev_private;
>> +       struct msm_gem_object *msm_obj;
>> +       struct drm_gem_object *obj = NULL;
>> +       int ret;
>> +
>> +       size = PAGE_ALIGN(size);
>> +
>> +       msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
>> +       if (!msm_obj)
>> +               goto fail;
>> +
>> +       obj = &msm_obj->base;
>> +
>> +       ret = drm_gem_object_init(dev, obj, size);
>> +       if (ret)
>> +               goto fail;
>> +
>> +       msm_obj->flags = flags;
>> +
>> +       mutex_lock(&obj->dev->struct_mutex);
>> +       list_add(&msm_obj->mm_list, &priv->obj_list);
>> +       mutex_unlock(&obj->dev->struct_mutex);
>> +
>> +       return obj;
>> +
>> +fail:
>> +       if (obj)
>> +               drm_gem_object_unreference_unlocked(obj);
>>
>> +
>> +       return NULL;
>> +}
>>
>
> Yay GEM.  No complaints here.
>
> Jordan
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index b16c50e..3327d78 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -220,3 +220,5 @@  source "drivers/gpu/drm/omapdrm/Kconfig"
 source "drivers/gpu/drm/tilcdc/Kconfig"
 
 source "drivers/gpu/drm/qxl/Kconfig"
+
+source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1c9f2439..4aa77c3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -52,4 +52,5 @@  obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
 obj-$(CONFIG_DRM_OMAP)	+= omapdrm/
 obj-$(CONFIG_DRM_TILCDC)	+= tilcdc/
 obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_MSM) += msm/
 obj-y			+= i2c/
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 0000000..a06c19c
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@ 
+
+config DRM_MSM
+	tristate "MSM DRM"
+	depends on DRM
+	depends on ARCH_MSM
+	depends on ARCH_MSM8960
+	select DRM_KMS_HELPER
+	select SHMEM
+	select TMPFS
+	default y
+	help
+	  DRM/KMS driver for MSM/snapdragon.
+
+config DRM_MSM_FBDEV
+	bool "Enable legacy fbdev support for MSM modesetting driver"
+	depends on DRM_MSM
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_SYS_FOPS
+	default y
+	help
+	  Choose this option if you have a need for the legacy fbdev
+	  support. Note that this support also provide the linux console
+	  support on top of the MSM modesetting driver.
+
+config DRM_MSM_REGISTER_LOGGING
+	bool "MSM DRM register logging"
+	depends on DRM_MSM
+	default n
+	help
+	  Compile in support for logging register reads/writes in a format
+	  that can be parsed by envytools demsm tool.  If enabled, register
+	  logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 0000000..29cf078
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,23 @@ 
+ccflags-y := -Iinclude/drm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+	ccflags-y += -Werror
+endif
+
+msm-y := \
+	hdmi/hdmi_connector.o \
+	hdmi/hdmi_i2c.o \
+	hdmi/hdmi_phy_8960.o \
+	hdmi/hdmi_phy_8x60.o \
+	mdp4/mdp4_crtc.o \
+	mdp4/mdp4_dtv_encoder.o \
+	mdp4/mdp4_irq.o \
+	mdp4/mdp4_kms.o \
+	mdp4/mdp4_plane.o \
+	msm_connector.o \
+	msm_drv.o \
+	msm_fb.o \
+	msm_gem.o
+
+msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
+
+obj-$(CONFIG_DRM_MSM)	+= msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 0000000..b9e9d03
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,43 @@ 
+Rough thoughts/notes..
+
+We have (at least) 3 different display controller blocks at play:
+ + MDP3 - ?? seems to be what is on geeksphone peak device
+ + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ + MDSS - snapdragon 800
+
+(I don't have a completely clear picture on which display controller
+is in which devices)
+
+But, HDMI/DSI/etc blocks seem like they can be shared.  And I for sure
+don't want to have to deal with N different kms devices from
+xf86-video-freedreno.  Plus, it seems like we can do some clever tricks
+like have kms/crtc code build up gpu cmdstream to update scanout after
+rendering without involving the cpu.
+
+And on gpu side of things:
+ + zero, one, or two 2d cores (z180)
+ + and either a2xx or a3xx 3d core.
+
+So, one drm driver, with some modularity.  Different 'struct msm_kms'
+implementations, depending on display controller.  And one or more
+'struct msm_gpu' for the various different gpu sub-modules.
+
+The kms module provides the plane, crtc, and encoder objects, and
+loads whatever connectors are appropriate.
+
+For MDP4, the mapping is (I think):
+
+  plane   -> PIPE{RGBn,VGn}              \
+  crtc    -> OVLP{n} + DMA{P,S,E} (??)   |-> MDP "device"
+  encoder -> DTV/LCDC/DSI (within MDP4)  /
+  connector -> HDMI/DSI/etc              --> other device(s)
+
+Since the irq's that drm core mostly cares about are vblank/framedone,
+we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
+and treat the MDP4 block's irq as "the" irq.  Even though the connectors
+may have their own irqs which they install themselves.  For this reason
+the display controller is the "master" device.
+
+Each connector probably ends up being a seperate device, just for the
+logistics of finding/mapping io region, irq, etc.
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 0000000..b42fe81
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,528 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <mach/board.h>
+#include <mach/socinfo.h>
+
+#include "hdmi_connector.h"
+
+static struct platform_device *hdmi_pdev;
+
+
+static void set_mode(struct hdmi_connector *hdmi_connector, bool power_on)
+{
+	uint32_t ctrl = 0;
+
+	if (power_on) {
+		ctrl |= HDMI_CTRL_ENABLE;
+		if (!hdmi_connector->hdmi) {
+			ctrl |= HDMI_CTRL_HDMI;
+			hdmi_write(hdmi_connector, REG_HDMI_CTRL, ctrl);
+			ctrl &= ~HDMI_CTRL_HDMI;
+		} else {
+			ctrl |= HDMI_CTRL_HDMI;
+		}
+	} else {
+		ctrl = HDMI_CTRL_HDMI;
+	}
+
+	hdmi_write(hdmi_connector, REG_HDMI_CTRL, ctrl);
+	DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
+			power_on ? "Enable" : "Disable", ctrl);
+}
+
+static int hpd_enable(struct hdmi_connector *hdmi_connector)
+{
+	struct drm_device *dev = hdmi_connector->base.base.dev;
+	struct msm_hdmi_platform_data *pd =
+			hdmi_connector->pdev->dev.platform_data;
+	struct hdmi_phy *phy = hdmi_connector->phy;
+	uint32_t hpd_ctrl;
+	int ret;
+
+	ret = pd->gpio_config(1);
+	if (ret) {
+		dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
+		goto fail;
+	}
+
+	ret = clk_prepare_enable(hdmi_connector->clk);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
+		goto fail;
+	}
+
+	ret = clk_prepare_enable(hdmi_connector->m_pclk);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
+		goto fail;
+	}
+
+	ret = clk_prepare_enable(hdmi_connector->s_pclk);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
+		goto fail;
+	}
+
+	if (hdmi_connector->mpp0)
+		ret = regulator_enable(hdmi_connector->mpp0);
+	if (!ret)
+		ret = regulator_enable(hdmi_connector->mvs);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+		goto fail;
+	}
+
+	set_mode(hdmi_connector, false);
+	phy->funcs->reset(phy);
+	set_mode(hdmi_connector, true);
+
+	hdmi_write(hdmi_connector, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+	/* enable HPD events: */
+	hdmi_write(hdmi_connector, REG_HDMI_HPD_INT_CTRL,
+			HDMI_HPD_INT_CTRL_INT_CONNECT |
+			HDMI_HPD_INT_CTRL_INT_EN);
+
+	/* set timeout to 4.1ms (max) for hardware debounce */
+	hpd_ctrl = hdmi_read(hdmi_connector, REG_HDMI_HPD_CTRL);
+	hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+	/* Toggle HPD circuit to trigger HPD sense */
+	hdmi_write(hdmi_connector, REG_HDMI_HPD_CTRL,
+			~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+	hdmi_write(hdmi_connector, REG_HDMI_HPD_CTRL,
+			HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static int hdp_disable(struct hdmi_connector *hdmi_connector)
+{
+	struct drm_device *dev = hdmi_connector->base.base.dev;
+	struct msm_hdmi_platform_data *pd =
+			hdmi_connector->pdev->dev.platform_data;
+	int ret = 0;
+
+	/* Disable HPD interrupt */
+	hdmi_write(hdmi_connector, REG_HDMI_HPD_INT_CTRL, 0);
+
+	set_mode(hdmi_connector, false);
+
+	if (hdmi_connector->mpp0)
+		ret = regulator_disable(hdmi_connector->mpp0);
+	if (!ret)
+		ret = regulator_disable(hdmi_connector->mvs);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+		goto fail;
+	}
+
+	clk_disable_unprepare(hdmi_connector->clk);
+	clk_disable_unprepare(hdmi_connector->m_pclk);
+	clk_disable_unprepare(hdmi_connector->s_pclk);
+
+	ret = pd->gpio_config(0);
+	if (ret) {
+		dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static irqreturn_t hdmi_connector_irq(int irq, void *dev_id)
+{
+	struct hdmi_connector *hdmi_connector = dev_id;
+	struct drm_connector *connector = &hdmi_connector->base.base;
+	uint32_t hpd_int_status, hpd_int_ctrl;
+
+	/* Process HPD: */
+	hpd_int_status = hdmi_read(hdmi_connector, REG_HDMI_HPD_INT_STATUS);
+	hpd_int_ctrl   = hdmi_read(hdmi_connector, REG_HDMI_HPD_INT_CTRL);
+
+	if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+			(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+		bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+
+		DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
+
+		/* ack the irq: */
+		hdmi_write(hdmi_connector, REG_HDMI_HPD_INT_CTRL,
+				hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+
+		drm_helper_hpd_irq_event(connector->dev);
+
+		/* detect disconnect if we are connected or visa versa: */
+		hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+		if (!detected)
+			hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+		hdmi_write(hdmi_connector, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+		return IRQ_HANDLED;
+	}
+
+	/* Process DDC: */
+	hdmi_i2c_irq(hdmi_connector->i2c);
+
+	/* TODO audio.. */
+
+	return IRQ_HANDLED;
+}
+
+static enum drm_connector_status hdmi_connector_detect(
+		struct drm_connector *connector, bool force)
+{
+	struct msm_connector *msm_connector = to_msm_connector(connector);
+	struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
+	uint32_t hpd_int_status;
+
+	hpd_int_status = hdmi_read(hdmi_connector, REG_HDMI_HPD_INT_STATUS);
+
+	return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+			connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
+	struct msm_connector *msm_connector = to_msm_connector(connector);
+	struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
+	struct hdmi_phy *phy = hdmi_connector->phy;
+
+	hdp_disable(hdmi_connector);
+
+	if (phy)
+		phy->funcs->destroy(phy);
+
+	if (hdmi_connector->i2c)
+		hdmi_i2c_destroy(hdmi_connector->i2c);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+
+	put_device(&hdmi_connector->pdev->dev);
+
+	kfree(hdmi_connector);
+}
+
+static int hdmi_connector_get_modes(struct drm_connector *connector)
+{
+	struct msm_connector *msm_connector = to_msm_connector(connector);
+	struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
+	struct edid *edid;
+	int ret = 0;
+
+	edid = drm_get_edid(connector, hdmi_connector->i2c);
+
+	drm_mode_connector_update_edid_property(connector, edid);
+
+	if (edid) {
+		ret = drm_add_edid_modes(connector, edid);
+		kfree(edid);
+	}
+
+	return ret;
+}
+
+static int hdmi_connector_mode_valid(struct drm_connector *connector,
+				 struct drm_display_mode *mode)
+{
+	return 0;
+}
+
+static const struct drm_connector_funcs hdmi_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = hdmi_connector_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.destroy = hdmi_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+	.get_modes = hdmi_connector_get_modes,
+	.mode_valid = hdmi_connector_mode_valid,
+	.best_encoder = msm_connector_attached_encoder,
+};
+
+static void hdmi_connector_dpms(struct msm_connector *msm_connector, int mode)
+{
+	struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
+	struct hdmi_phy *phy = hdmi_connector->phy;
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+	DBG("mode=%d", mode);
+
+	if (enabled == hdmi_connector->enabled)
+		return;
+
+	if (enabled) {
+		phy->funcs->powerup(phy);
+		set_mode(hdmi_connector, true);
+	} else {
+		set_mode(hdmi_connector, false);
+		phy->funcs->powerdown(phy);
+	}
+
+	hdmi_connector->enabled = enabled;
+}
+
+static void hdmi_connector_mode_set(struct msm_connector *msm_connector,
+		struct drm_display_mode *mode)
+{
+	struct hdmi_connector *hdmi_connector = to_hdmi_connector(msm_connector);
+	int hstart, hend, vstart, vend;
+	uint32_t frame_ctrl;
+
+	hdmi_connector->hdmi = drm_match_cea_mode(mode) > 1;
+
+	hstart = mode->htotal - mode->hsync_start;
+	hend   = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+	vstart = mode->vtotal - mode->vsync_start - 1;
+	vend   = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+	DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+			mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+	hdmi_write(hdmi_connector, REG_HDMI_TOTAL,
+			HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+			HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+	hdmi_write(hdmi_connector, REG_HDMI_ACTIVE_HSYNC,
+			HDMI_ACTIVE_HSYNC_START(hstart) |
+			HDMI_ACTIVE_HSYNC_END(hend));
+	hdmi_write(hdmi_connector, REG_HDMI_ACTIVE_VSYNC,
+			HDMI_ACTIVE_VSYNC_START(vstart) |
+			HDMI_ACTIVE_VSYNC_END(vend));
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+		hdmi_write(hdmi_connector, REG_HDMI_VSYNC_TOTAL_F2,
+				HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+		hdmi_write(hdmi_connector, REG_HDMI_VSYNC_ACTIVE_F2,
+				HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+				HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+	} else {
+		hdmi_write(hdmi_connector, REG_HDMI_VSYNC_TOTAL_F2,
+				HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+		hdmi_write(hdmi_connector, REG_HDMI_VSYNC_ACTIVE_F2,
+				HDMI_VSYNC_ACTIVE_F2_START(0) |
+				HDMI_VSYNC_ACTIVE_F2_END(0));
+	}
+
+	frame_ctrl = 0;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+	DBG("frame_ctrl=%08x", frame_ctrl);
+	hdmi_write(hdmi_connector, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+	// TODO until we have audio, this might be safest:
+	if (hdmi_connector->hdmi)
+		hdmi_write(hdmi_connector, REG_HDMI_GC, HDMI_GC_MUTE);
+}
+
+static const struct msm_connector_funcs msm_connector_funcs = {
+		.dpms = hdmi_connector_dpms,
+		.mode_set = hdmi_connector_mode_set,
+};
+
+
+/* initialize connector */
+struct drm_connector *hdmi_connector_init(struct drm_device *dev,
+		struct drm_encoder *encoder)
+{
+	struct drm_connector *connector = NULL;
+	struct hdmi_connector *hdmi_connector;
+	struct platform_device *pdev = hdmi_pdev;
+	struct resource *res;
+	int ret;
+
+	if (!pdev) {
+		dev_err(dev->dev, "no hdmi device\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	get_device(&pdev->dev);
+
+	hdmi_connector = kzalloc(sizeof(struct hdmi_connector), GFP_KERNEL);
+	if (!hdmi_connector) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	connector = &hdmi_connector->base.base;
+
+	hdmi_connector->pdev = pdev;
+
+	msm_connector_init(&hdmi_connector->base,
+			&msm_connector_funcs, encoder);
+
+	drm_connector_init(dev, connector, &hdmi_connector_funcs,
+			DRM_MODE_CONNECTOR_HDMIA);
+	drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+
+	connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+	connector->interlace_allowed = 1;
+	connector->doublescan_allowed = 0;
+
+	drm_sysfs_connector_add(connector);
+
+	/* not sure about which phy maps to which msm.. probably I miss some */
+	if (cpu_is_msm8960() || cpu_is_apq8064())
+		hdmi_connector->phy = hdmi_phy_8960_init(hdmi_connector);
+	else if (cpu_is_msm8x60())
+		hdmi_connector->phy = hdmi_phy_8x60_init(hdmi_connector);
+	else
+		hdmi_connector->phy = ERR_PTR(-ENXIO);
+
+	if (IS_ERR(hdmi_connector->phy)) {
+		ret = PTR_ERR(hdmi_connector->phy);
+		dev_err(dev->dev, "failed to load phy: %d\n", ret);
+		hdmi_connector->phy = NULL;
+		goto fail;
+	}
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "hdmi_msm_hdmi_addr");
+	if (!res) {
+		dev_err(dev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	hdmi_connector->mmio = msm_ioremap(&pdev->dev,
+			res->start, resource_size(res), "HDMI");
+	if (!hdmi_connector->mmio) {
+		dev_err(dev->dev, "failed to ioremap\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	hdmi_connector->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
+	if (IS_ERR(hdmi_connector->mvs))
+		hdmi_connector->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
+	if (IS_ERR(hdmi_connector->mvs)) {
+		ret = PTR_ERR(hdmi_connector->mvs);
+		dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
+		goto fail;
+	}
+
+	hdmi_connector->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
+	if (IS_ERR(hdmi_connector->mpp0))
+		hdmi_connector->mpp0 = NULL;
+
+	hdmi_connector->clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(hdmi_connector->clk)) {
+		ret = PTR_ERR(hdmi_connector->clk);
+		dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
+		goto fail;
+	}
+
+	hdmi_connector->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
+	if (IS_ERR(hdmi_connector->m_pclk)) {
+		ret = PTR_ERR(hdmi_connector->m_pclk);
+		dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
+		goto fail;
+	}
+
+	hdmi_connector->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
+	if (IS_ERR(hdmi_connector->s_pclk)) {
+		ret = PTR_ERR(hdmi_connector->s_pclk);
+		dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
+		goto fail;
+	}
+
+	hdmi_connector->irq = platform_get_irq(pdev, 0);
+	if (hdmi_connector->irq < 0) {
+		ret = hdmi_connector->irq;
+		dev_err(dev->dev, "failed to get irq: %d\n", ret);
+		goto fail;
+	}
+
+	ret = devm_request_threaded_irq(&pdev->dev, hdmi_connector->irq,
+			NULL, hdmi_connector_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+			"hdmi_connector_isr", hdmi_connector);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+				hdmi_connector->irq, ret);
+		goto fail;
+	}
+
+	hdmi_connector->i2c = hdmi_i2c_init(hdmi_connector);
+	if (IS_ERR(hdmi_connector->i2c)) {
+		ret = PTR_ERR(hdmi_connector->i2c);
+		dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+		hdmi_connector->i2c = NULL;
+		goto fail;
+	}
+
+	ret = hpd_enable(hdmi_connector);
+	if (ret) {
+		dev_err(dev->dev, "failed to enable HPD: %d\n", ret);
+		goto fail;
+	}
+
+	return connector;
+
+fail:
+	if (connector)
+		hdmi_connector_destroy(connector);
+
+	return ERR_PTR(ret);
+}
+
+/*
+ * The hdmi device:
+ */
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+	hdmi_pdev = pdev;
+	return 0;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+	hdmi_pdev = NULL;
+	return 0;
+}
+
+static struct platform_driver hdmi_driver = {
+	.probe = hdmi_probe,
+	.remove = hdmi_remove,
+	.driver.name = "hdmi_msm",
+};
+
+void __init hdmi_init(void)
+{
+	platform_driver_register(&hdmi_driver);
+}
+
+void __exit hdmi_fini(void)
+{
+	platform_driver_unregister(&hdmi_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.h b/drivers/gpu/drm/msm/hdmi/hdmi_connector.h
new file mode 100644
index 0000000..c315fda
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.h
@@ -0,0 +1,95 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HDMI_CONNECTOR_H__
+#define __HDMI_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_connector.h"
+
+#include "hdmi.xml.h"
+
+
+struct hdmi_phy;
+
+struct hdmi_connector {
+	struct msm_connector base;
+
+	struct platform_device *pdev;
+
+	void __iomem *mmio;
+
+	struct regulator *mvs;        /* HDMI_5V */
+	struct regulator *mpp0;       /* External 5V */
+
+	struct clk *clk;
+	struct clk *m_pclk;
+	struct clk *s_pclk;
+
+	struct hdmi_phy *phy;
+	struct i2c_adapter *i2c;
+
+	int irq;
+
+	bool enabled;                 /* DPMS state */
+	bool hdmi;                    /* are we in hdmi mode? */
+};
+#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+
+static inline void hdmi_write(struct hdmi_connector *c, u32 reg, u32 data)
+{
+	msm_writel(data, c->mmio + reg);
+}
+
+static inline u32 hdmi_read(struct hdmi_connector *c, u32 reg)
+{
+	return msm_readl(c->mmio + reg);
+}
+
+/*
+ * The phy appears to be different, for example between 8960 and 8x60,
+ * so split the phy related functions out and load the correct one at
+ * runtime:
+ */
+
+struct hdmi_phy_funcs {
+	void (*destroy)(struct hdmi_phy *phy);
+	void (*reset)(struct hdmi_phy *phy);
+	void (*powerup)(struct hdmi_phy *phy);
+	void (*powerdown)(struct hdmi_phy *phy);
+};
+
+struct hdmi_phy {
+	const struct hdmi_phy_funcs *funcs;
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi_connector *hdmi_connector);
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi_connector *hdmi_connector);
+
+/*
+ * i2c adapter for ddc:
+ */
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c);
+void hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *hdmi_i2c_init(struct hdmi_connector *hdmi_connector);
+
+#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 0000000..def1d0c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,264 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi_connector.h"
+
+struct hdmi_i2c_adapter {
+	struct i2c_adapter base;
+	struct hdmi_connector *hdmi_connector;
+	struct completion ddc_xfer;
+};
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+	struct hdmi_connector *hdmi_connector = hdmi_i2c->hdmi_connector;
+
+	hdmi_write(hdmi_connector, REG_HDMI_DDC_SPEED,
+			HDMI_DDC_SPEED_THRESHOLD(2) |
+			HDMI_DDC_SPEED_PRESCALE(10));
+
+	hdmi_write(hdmi_connector, REG_HDMI_DDC_SETUP,
+			HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+	/* enable reference timer for 27us */
+	hdmi_write(hdmi_connector, REG_HDMI_DDC_REF,
+			HDMI_DDC_REF_REFTIMER_ENABLE |
+			HDMI_DDC_REF_REFTIMER(27));
+}
+
+static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+	struct hdmi_connector *hdmi_connector = hdmi_i2c->hdmi_connector;
+	struct drm_device *dev = hdmi_connector->base.base.dev;
+	uint32_t retry = 0xffff;
+	uint32_t ddc_int_ctrl;
+
+	do {
+		--retry;
+
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_INT_CTRL,
+				HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+				HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+		ddc_int_ctrl = hdmi_read(hdmi_connector, REG_HDMI_DDC_INT_CTRL);
+
+	} while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+	if (!retry) {
+		dev_err(dev->dev, "timeout waiting for DDC\n");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+#define MAX_TRANSACTIONS 4
+
+static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+		struct i2c_msg *msgs, int num)
+{
+	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+	struct hdmi_connector *hdmi_connector = hdmi_i2c->hdmi_connector;
+	struct drm_device *dev = hdmi_connector->base.base.dev;
+	int indices[MAX_TRANSACTIONS];
+	int ret, i, j, index = 0;
+	uint32_t ddc_status, ddc_data, i2c_trans;
+
+	num = min(num, MAX_TRANSACTIONS);
+
+	WARN_ON(!(hdmi_read(hdmi_connector, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
+	if (num == 0)
+		return num;
+
+	init_ddc(hdmi_i2c);
+
+	ret = ddc_clear_irq(hdmi_i2c);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < num; i++) {
+		struct i2c_msg *p = &msgs[i];
+		uint32_t raw_addr = p->addr << 1;
+
+		if (p->flags & I2C_M_RD)
+			raw_addr |= 1;
+
+		ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
+				HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+
+		if (i == 0) {
+			ddc_data |= HDMI_DDC_DATA_INDEX(0) |
+					HDMI_DDC_DATA_INDEX_WRITE;
+		}
+
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_DATA, ddc_data);
+		index++;
+
+		indices[i] = index;
+
+		if (p->flags & I2C_M_RD) {
+			index += p->len;
+		} else {
+			for (j = 0; j < p->len; j++) {
+				ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
+						HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+				hdmi_write(hdmi_connector, REG_HDMI_DDC_DATA, ddc_data);
+				index++;
+			}
+		}
+
+		i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+				HDMI_I2C_TRANSACTION_REG_RW(
+						(p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
+				HDMI_I2C_TRANSACTION_REG_START;
+
+		if (i == (num - 1))
+			i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
+
+		hdmi_write(hdmi_connector, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
+	}
+
+	INIT_COMPLETION(hdmi_i2c->ddc_xfer);
+
+	/* trigger the transfer: */
+	hdmi_write(hdmi_connector, REG_HDMI_DDC_CTRL,
+			HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
+			HDMI_DDC_CTRL_GO);
+
+	ret = wait_for_completion_interruptible_timeout(
+			&hdmi_i2c->ddc_xfer, HZ/2);
+	if (ret <= 0) {
+		if (ret == 0)
+			ret = -ETIMEDOUT;
+		dev_warn(dev->dev, "DDC timeout: %d\n", ret);
+		DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
+				hdmi_read(hdmi_connector, REG_HDMI_DDC_SW_STATUS),
+				hdmi_read(hdmi_connector, REG_HDMI_DDC_HW_STATUS),
+				hdmi_read(hdmi_connector, REG_HDMI_DDC_INT_CTRL));
+		return ret;
+	}
+
+	/* check for NACK: */
+	ddc_status = hdmi_read(hdmi_connector, REG_HDMI_DDC_SW_STATUS);
+	if (ddc_status & (HDMI_DDC_SW_STATUS_NACK0 | HDMI_DDC_SW_STATUS_NACK1 |
+			HDMI_DDC_SW_STATUS_NACK2 | HDMI_DDC_SW_STATUS_NACK3)) {
+		DBG("ddc_status=%08x", ddc_status);
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_CTRL,
+				HDMI_DDC_CTRL_SW_STATUS_RESET);
+
+		/* msm fbdev driver does this only on last retry: */
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_CTRL,
+				HDMI_DDC_CTRL_SOFT_RESET);
+	}
+
+	/* read back results of any read transactions: */
+	for (i = 0; i < num; i++) {
+		struct i2c_msg *p = &msgs[i];
+
+		if (!(p->flags & I2C_M_RD))
+			continue;
+
+		ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
+				HDMI_DDC_DATA_INDEX(indices[i]) |
+				HDMI_DDC_DATA_INDEX_WRITE;
+
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_DATA, ddc_data);
+
+		/* discard first byte: */
+		hdmi_read(hdmi_connector, REG_HDMI_DDC_DATA);
+
+		for (j = 0; j < p->len; j++) {
+			ddc_data = hdmi_read(hdmi_connector, REG_HDMI_DDC_DATA);
+			p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
+		}
+	}
+
+	return num;
+}
+
+static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hdmi_i2c_algorithm = {
+	.master_xfer	= hdmi_i2c_xfer,
+	.functionality	= hdmi_i2c_func,
+};
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c)
+{
+	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+	struct hdmi_connector *hdmi_connector = hdmi_i2c->hdmi_connector;
+	uint32_t ddc_int_ctrl;
+
+	ddc_int_ctrl = hdmi_read(hdmi_connector, REG_HDMI_DDC_INT_CTRL);
+
+	if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
+			(ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
+		hdmi_write(hdmi_connector, REG_HDMI_DDC_INT_CTRL,
+				HDMI_DDC_INT_CTRL_SW_DONE_ACK);
+		complete(&hdmi_i2c->ddc_xfer);
+	}
+}
+
+void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+{
+	struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+	i2c_del_adapter(i2c);
+	kfree(hdmi_i2c);
+}
+
+struct i2c_adapter *hdmi_i2c_init(struct hdmi_connector *hdmi_connector)
+{
+	struct drm_device *dev = hdmi_connector->base.base.dev;
+	struct hdmi_i2c_adapter *hdmi_i2c;
+	struct i2c_adapter *i2c = NULL;
+	int ret;
+
+	hdmi_i2c = kzalloc(sizeof(struct hdmi_i2c_adapter), GFP_KERNEL);
+	if (!hdmi_i2c) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	i2c = &hdmi_i2c->base;
+
+	hdmi_i2c->hdmi_connector = hdmi_connector;
+	init_completion(&hdmi_i2c->ddc_xfer);
+
+	i2c->owner = THIS_MODULE;
+	i2c->class = I2C_CLASS_DDC;
+	snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
+	i2c->dev.parent = &hdmi_connector->pdev->dev;
+	i2c->algo = &hdmi_i2c_algorithm;
+
+	ret = i2c_add_adapter(i2c);
+	if (ret) {
+		dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+		goto fail;
+	}
+
+	return i2c;
+
+fail:
+	if (i2c)
+		hdmi_i2c_destroy(i2c);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 0000000..7883b26
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,140 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi_connector.h"
+
+struct hdmi_phy_8960 {
+	struct hdmi_phy base;
+	struct hdmi_connector *hdmi_connector;
+};
+#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+	kfree(phy_8960);
+}
+
+static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+	struct hdmi_connector *hdmi_connector = phy_8960->hdmi_connector;
+	unsigned int val;
+
+	val = hdmi_read(hdmi_connector, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET_PLL);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+	}
+}
+
+static void hdmi_phy_8960_powerup(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+	struct hdmi_connector *hdmi_connector = phy_8960->hdmi_connector;
+
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG0, 0x1b);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG1, 0xf2);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG4, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG5, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG6, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG7, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG8, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG9, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG10, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG11, 0x00);
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG3, 0x20);
+}
+
+static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+	struct hdmi_connector *hdmi_connector = phy_8960->hdmi_connector;
+
+	hdmi_write(hdmi_connector, REG_HDMI_8960_PHY_REG2, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
+		.destroy = hdmi_phy_8960_destroy,
+		.reset = hdmi_phy_8960_reset,
+		.powerup = hdmi_phy_8960_powerup,
+		.powerdown = hdmi_phy_8960_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi_connector *hdmi_connector)
+{
+	struct hdmi_phy_8960 *phy_8960;
+	struct hdmi_phy *phy = NULL;
+	int ret;
+
+	phy_8960 = kzalloc(sizeof(struct hdmi_phy_8960), GFP_KERNEL);
+	if (!phy_8960) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phy = &phy_8960->base;
+
+	phy->funcs = &hdmi_phy_8960_funcs;
+
+	phy_8960->hdmi_connector = hdmi_connector;
+
+	return phy;
+
+fail:
+	if (phy)
+		hdmi_phy_8960_destroy(phy);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 0000000..e005818
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,215 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi_connector.h"
+
+struct hdmi_phy_8x60 {
+	struct hdmi_phy base;
+	struct hdmi_connector *hdmi_connector;
+};
+#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
+
+static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+	kfree(phy_8x60);
+}
+
+static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+	struct hdmi_connector *hdmi_connector = phy_8x60->hdmi_connector;
+	unsigned int val;
+
+	val = hdmi_read(hdmi_connector, REG_HDMI_PHY_CTRL);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	}
+
+	msleep(100);
+
+	if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+		/* pull high */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val | HDMI_PHY_CTRL_SW_RESET);
+	} else {
+		/* pull low */
+		hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+				val & ~HDMI_PHY_CTRL_SW_RESET);
+	}
+}
+
+static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+	struct hdmi_connector *hdmi_connector = phy_8x60->hdmi_connector;
+
+	/* De-serializer delay D/C for non-lbk mode: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG0,
+			HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+
+	if (1 /*?? video_format == HDMI_VFRMT_720x480p60_16_9*/) {
+		hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG1,
+				HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+				HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+	} else {
+		/* If the freq. is less than 120MHz, use low gain 0
+		 * for board with termination
+		 */
+		hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG1,
+				HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+				HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+	}
+
+	/* No matter what, start from the power down mode: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_PD_PWRGEN |
+			HDMI_8x60_PHY_REG2_PD_PLL |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+
+	/* Turn PowerGen on: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_PD_PLL |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+
+	/* Turn PLL power on: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+
+	/* Write to HIGH after PLL power down de-assert: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG3,
+			HDMI_8x60_PHY_REG3_PLL_ENABLE);
+
+	/* ASIC power on; PHY REG9 = 0 */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG9, 0);
+
+	/* Enable PLL lock detect, PLL lock det will go high after lock
+	 * Enable the re-time logic
+	 */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG12,
+			HDMI_8x60_PHY_REG12_RETIMING_EN |
+			HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+
+	/* Drivers are on: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_PD_DESER);
+
+	/* If the RX detector is needed: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG4, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG5, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG6, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG7, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG8, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG9, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG10, 0);
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG11, 0);
+
+	/* If we want to use lock enable based on counting: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG12,
+			HDMI_8x60_PHY_REG12_RETIMING_EN |
+			HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+			HDMI_8x60_PHY_REG12_FORCE_LOCK);
+}
+
+static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
+{
+	struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+	struct hdmi_connector *hdmi_connector = phy_8x60->hdmi_connector;
+
+	/* Assert RESET PHY from controller */
+	hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL,
+			HDMI_PHY_CTRL_SW_RESET);
+	udelay(10);
+	/* De-assert RESET PHY from controller */
+	hdmi_write(hdmi_connector, REG_HDMI_PHY_CTRL, 0);
+	/* Turn off Driver */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+	udelay(10);
+	/* Disable PLL */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG3, 0);
+	/* Power down PHY, but keep RX-sense: */
+	hdmi_write(hdmi_connector, REG_HDMI_8x60_PHY_REG2,
+			HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+			HDMI_8x60_PHY_REG2_PD_PWRGEN |
+			HDMI_8x60_PHY_REG2_PD_PLL |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+			HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+			HDMI_8x60_PHY_REG2_PD_DESER);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
+		.destroy = hdmi_phy_8x60_destroy,
+		.reset = hdmi_phy_8x60_reset,
+		.powerup = hdmi_phy_8x60_powerup,
+		.powerdown = hdmi_phy_8x60_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi_connector *hdmi_connector)
+{
+	struct hdmi_phy_8x60 *phy_8x60;
+	struct hdmi_phy *phy = NULL;
+	int ret;
+
+	phy_8x60 = kzalloc(sizeof(struct hdmi_phy_8x60), GFP_KERNEL);
+	if (!phy_8x60) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	phy = &phy_8x60->base;
+
+	phy->funcs = &hdmi_phy_8x60_funcs;
+
+	phy_8x60->hdmi_connector = hdmi_connector;
+
+	return phy;
+
+fail:
+	if (phy)
+		hdmi_phy_8x60_destroy(phy);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 0000000..5ede630
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,440 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kfifo.h>
+
+#include "mdp4_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_crtc {
+	struct drm_crtc base;
+	char name[8];
+	struct drm_plane *plane;
+	int id;
+	int ovlp;
+	enum mdp4_dma dma;
+	bool enabled;
+
+	/* if there is a pending flip, these will be non-null: */
+	struct drm_pending_vblank_event *event;
+	struct drm_framebuffer *old_fb;
+
+	/* we can't free fb from irq, so deferred to worker: */
+	DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
+	struct work_struct work;
+
+	struct mdp4_irq irq;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+	struct msm_drm_private *priv = crtc->dev->dev_private;
+	return to_mdp4_kms(priv->kms);
+}
+
+static void update_fb(struct mdp4_crtc *mdp4_crtc,
+		struct drm_framebuffer *new_fb, struct drm_framebuffer *old_fb)
+{
+	if (WARN_ON(mdp4_crtc->old_fb))
+		drm_framebuffer_unreference(mdp4_crtc->old_fb);
+
+	/* keep track of the previously scanned out buffer to unref: */
+	mdp4_crtc->old_fb = old_fb;
+
+	/* grab reference to incoming scanout fb: */
+	drm_framebuffer_reference(new_fb);
+	mdp4_crtc->base.fb = new_fb;
+}
+
+static void unref_worker(struct work_struct *work)
+{
+	struct mdp4_crtc *mdp4_crtc = container_of(work, struct mdp4_crtc, work);
+	struct drm_device *dev = mdp4_crtc->base.dev;
+	struct drm_framebuffer *fb;
+
+	mutex_lock(&dev->mode_config.mutex);
+	while (kfifo_get(&mdp4_crtc->unref_fifo, &fb))
+		drm_framebuffer_unreference(fb);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+	mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
+	drm_crtc_cleanup(crtc);
+
+	WARN_ON(!kfifo_is_empty(&mdp4_crtc->unref_fifo));
+	kfifo_free(&mdp4_crtc->unref_fifo);
+
+	kfree(mdp4_crtc);
+}
+
+static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+	DBG("%s: mode=%d", mdp4_crtc->name, mode);
+
+	if (enabled == mdp4_crtc->enabled)
+		return;
+
+	if (enabled)
+		mdp4_irq_register(mdp4_kms, &mdp4_crtc->irq);
+	else
+		mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->irq);
+
+	mdp4_crtc->enabled = enabled;
+}
+
+static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	int i, ovlp = mdp4_crtc->ovlp;
+
+	/*
+	 * This probably would also need to be triggered by any attached
+	 * plane when it changes.. for now since we are only using a single
+	 * private plane, the configuration is hard-coded:
+	 */
+
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+	for (i = 0; i < 4; i++) {
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
+				MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+				MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_CONST));
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+	}
+
+	/* XXX hard code for pipe2 (RGB1).. we need to figure this out from
+	 * what plane(s) are attached..
+	 */
+	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG,
+			MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
+			MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+}
+
+static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode,
+		int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	enum mdp4_dma dma = mdp4_crtc->dma;
+	int ret, ovlp = mdp4_crtc->ovlp;
+
+	mode = adjusted_mode;
+
+	DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			mdp4_crtc->name, mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+	/* take data from pipe: */
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
+			crtc->fb->pitches[0]);
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+			MDP4_DMA_DST_SIZE_WIDTH(0) |
+			MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
+			crtc->fb->pitches[0]);
+
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+	update_fb(mdp4_crtc, crtc->fb, old_fb);
+
+	ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16);
+	if (ret) {
+		dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+				mdp4_crtc->name, ret);
+		return ret;
+	}
+
+	blend_setup(crtc);
+
+	/*
+	 * I believe this is the *output* format to the encoder.. so possibly
+	 * we need to be finding this out from the encoder:
+	 */
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(dma),
+			MDP4_DMA_CONFIG_R_BPC(DBPC8) |
+			MDP4_DMA_CONFIG_G_BPC(DBPC8) |
+			MDP4_DMA_CONFIG_B_BPC(DBPC8) |
+			MDP4_DMA_CONFIG_PACK(0x21));
+
+	if (dma == DMA_E) {
+		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+	}
+
+	return 0;
+}
+
+static void mdp4_crtc_flush(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	uint32_t flush = 0;
+
+	flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
+	flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	DBG("%s", mdp4_crtc->name);
+	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_crtc_commit(struct drm_crtc *crtc)
+{
+	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	mdp4_crtc_flush(crtc);
+}
+
+static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+		struct drm_framebuffer *old_fb)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct drm_plane *plane = mdp4_crtc->plane;
+	struct drm_display_mode *mode = &crtc->mode;
+
+	update_fb(mdp4_crtc, crtc->fb, old_fb);
+
+	return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+			0, 0, mode->hdisplay, mode->vdisplay,
+			x << 16, y << 16,
+			mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
+		struct drm_framebuffer *new_fb,
+		struct drm_pending_vblank_event *event)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct drm_device *dev = crtc->dev;
+
+	if (mdp4_crtc->old_fb) {
+		dev_err(dev->dev, "already pending page flip!\n");
+		return -EBUSY;
+	}
+
+	mdp4_crtc->event = event;
+
+	update_fb(mdp4_crtc, new_fb, crtc->fb);
+
+	mdp4_plane_set_scanout(mdp4_crtc->plane, new_fb);
+	mdp4_crtc_flush(crtc);
+
+	return 0;
+}
+
+static int mdp4_crtc_set_property(struct drm_crtc *crtc,
+		struct drm_property *property, uint64_t val)
+{
+	// XXX
+	return -EINVAL;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+	.set_config = drm_crtc_helper_set_config,
+	.destroy = mdp4_crtc_destroy,
+	.page_flip = mdp4_crtc_page_flip,
+	.set_property = mdp4_crtc_set_property,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+	.dpms = mdp4_crtc_dpms,
+	.mode_fixup = mdp4_crtc_mode_fixup,
+	.mode_set = mdp4_crtc_mode_set,
+	.prepare = mdp4_crtc_prepare,
+	.commit = mdp4_crtc_commit,
+	.mode_set_base = mdp4_crtc_mode_set_base,
+	.load_lut = mdp4_crtc_load_lut,
+};
+
+static void mdp4_crtc_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, irq);
+	struct drm_device *dev = mdp4_crtc->base.dev;
+	struct drm_pending_vblank_event *event;
+	unsigned long flags;
+
+	if (mdp4_crtc->old_fb) {
+		if (kfifo_put(&mdp4_crtc->unref_fifo,
+				(const struct drm_framebuffer **)&mdp4_crtc->old_fb)) {
+			struct msm_drm_private *priv = dev->dev_private;
+			queue_work(priv->wq, &mdp4_crtc->work);
+		} else {
+			dev_err(dev->dev, "unref fifo full!\n");
+			drm_framebuffer_unreference(mdp4_crtc->old_fb);
+		}
+		mdp4_crtc->old_fb = NULL;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	event = mdp4_crtc->event;
+	mdp4_crtc->event = NULL;
+	if (event)
+		drm_send_vblank_event(dev, mdp4_crtc->id, event);
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	return mdp4_crtc->irq.irqmask;
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
+{
+	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+	struct mdp4_kms *mdp4_kms = get_kms(crtc);
+	uint32_t intf_sel;
+
+	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+	switch (mdp4_crtc->dma) {
+	case DMA_P:
+		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+		break;
+	case DMA_S:
+		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+		break;
+	case DMA_E:
+		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+		break;
+	}
+
+	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+static const char *dma_names[] = {
+		"DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, int id, int ovlp_id,
+		enum mdp4_dma dma_id)
+{
+	struct drm_crtc *crtc = NULL;
+	struct mdp4_crtc *mdp4_crtc;
+	int ret;
+
+	mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+	if (!mdp4_crtc) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	crtc = &mdp4_crtc->base;
+
+	mdp4_crtc->plane = plane;
+	mdp4_crtc->plane->crtc = crtc;
+
+	mdp4_crtc->ovlp = ovlp_id;
+	mdp4_crtc->dma = dma_id;
+
+	mdp4_crtc->irq.irqmask = dma2irq(mdp4_crtc->dma);
+	mdp4_crtc->irq.irq = mdp4_crtc_irq;
+
+	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+			dma_names[dma_id], ovlp_id);
+
+	ret = kfifo_alloc(&mdp4_crtc->unref_fifo, 16, GFP_KERNEL);
+	if (ret) {
+		dev_err(dev->dev, "could not allocate unref FIFO\n");
+		goto fail;
+	}
+
+	INIT_WORK(&mdp4_crtc->work, unref_worker);
+
+	drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
+	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+	mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+
+	return crtc;
+
+fail:
+	if (crtc)
+		mdp4_crtc_destroy(crtc);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 0000000..c5ce94d
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,306 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <mach/clk.h>
+
+#include "mdp4_kms.h"
+#include "msm_connector.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_dtv_encoder {
+	struct drm_encoder base;
+	struct clk *src_clk;
+	struct clk *hdmi_clk;
+	struct clk *mdp_clk;
+	unsigned long int pixclock;
+	bool enabled;
+	uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	return to_mdp4_kms(priv->kms);
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+/* not ironically named at all.. no, really.. */
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+	struct drm_device *dev = mdp4_dtv_encoder->base.dev;
+	struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
+
+	if (!dtv_pdata) {
+		dev_err(dev->dev, "could not find dtv pdata\n");
+		return;
+	}
+
+	if (dtv_pdata->bus_scale_table) {
+		mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
+				dtv_pdata->bus_scale_table);
+		DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
+		DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
+		if (dtv_pdata->lcdc_power_save)
+			dtv_pdata->lcdc_power_save(1);
+	}
+}
+
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+	if (mdp4_dtv_encoder->bsc) {
+		msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
+		mdp4_dtv_encoder->bsc = 0;
+	}
+}
+
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
+{
+	if (mdp4_dtv_encoder->bsc) {
+		DBG("set bus scaling: %d", idx);
+		msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
+	}
+}
+#else
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
+#endif
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+	bs_fini(mdp4_dtv_encoder);
+	drm_encoder_cleanup(encoder);
+	kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+	.destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+	struct msm_connector *msm_connector = get_connector(encoder);
+	struct mdp4_kms *mdp4_kms = get_kms(encoder);
+	bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+	DBG("mode=%d", mode);
+
+	if (enabled == mdp4_dtv_encoder->enabled)
+		return;
+
+	if (enabled) {
+		unsigned long pc = mdp4_dtv_encoder->pixclock;
+		int ret;
+
+		bs_set(mdp4_dtv_encoder, 1);
+
+		DBG("setting src_clk=%lu", pc);
+
+		ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+		if (ret)
+			dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+		clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+		ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+		if (ret)
+			dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+		ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+		if (ret)
+			dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+		if (msm_connector)
+			msm_connector->funcs->dpms(msm_connector, mode);
+
+		mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+	} else {
+		mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+		/*
+		 * Wait for a vsync so we know the ENABLE=0 latched before
+		 * the (connector) source of the vsync's gets disabled,
+		 * otherwise we end up in a funny state if we re-enable
+		 * before the disable latches, which results that some of
+		 * the settings changes for the new modeset (like new
+		 * scanout buffer) don't latch properly..
+		 */
+		mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+
+		if (msm_connector)
+			msm_connector->funcs->dpms(msm_connector, mode);
+
+		clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+		clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+		clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+		bs_set(mdp4_dtv_encoder, 0);
+	}
+
+	mdp4_dtv_encoder->enabled = enabled;
+}
+
+static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
+		const struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+		struct drm_display_mode *mode,
+		struct drm_display_mode *adjusted_mode)
+{
+	struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+	struct msm_connector *msm_connector = get_connector(encoder);
+	struct mdp4_kms *mdp4_kms = get_kms(encoder);
+	uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+	uint32_t display_v_start, display_v_end;
+	uint32_t hsync_start_x, hsync_end_x;
+
+	mode = adjusted_mode;
+
+	DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+			mode->base.id, mode->name,
+			mode->vrefresh, mode->clock,
+			mode->hdisplay, mode->hsync_start,
+			mode->hsync_end, mode->htotal,
+			mode->vdisplay, mode->vsync_start,
+			mode->vsync_end, mode->vtotal,
+			mode->type, mode->flags);
+
+	mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+	DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+	ctrl_pol = 0;
+	if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+		ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+	if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+		ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+	/* probably need to get DATA_EN polarity from panel.. */
+
+	dtv_hsync_skew = 0;  /* get this from panel? */
+
+	hsync_start_x = (mode->htotal - mode->hsync_start);
+	hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+	vsync_period = mode->vtotal * mode->htotal;
+	vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+	display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+	display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+			MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+			MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+			MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+			MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+			MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+			MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+			MDP4_DTV_ACTIVE_HCTL_START(0) |
+			MDP4_DTV_ACTIVE_HCTL_END(0));
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+
+	if (msm_connector)
+		msm_connector->funcs->mode_set(msm_connector, mode);
+}
+
+static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+{
+	mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+{
+	mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
+	mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+	.dpms = mdp4_dtv_encoder_dpms,
+	.mode_fixup = mdp4_dtv_encoder_mode_fixup,
+	.mode_set = mdp4_dtv_encoder_mode_set,
+	.prepare = mdp4_dtv_encoder_prepare,
+	.commit = mdp4_dtv_encoder_commit,
+};
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+	struct drm_encoder *encoder = NULL;
+	struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+	int ret;
+
+	mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+	if (!mdp4_dtv_encoder) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	encoder = &mdp4_dtv_encoder->base;
+
+	drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+	drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+	mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
+	if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
+		dev_err(dev->dev, "failed to get src_clk\n");
+		ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
+		goto fail;
+	}
+
+	mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+	if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+		dev_err(dev->dev, "failed to get hdmi_clk\n");
+		ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+		goto fail;
+	}
+
+	mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+	if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+		dev_err(dev->dev, "failed to get mdp_clk\n");
+		ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+		goto fail;
+	}
+
+	bs_init(mdp4_dtv_encoder);
+
+	return encoder;
+
+fail:
+	if (encoder)
+		mdp4_dtv_encoder_destroy(encoder);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 0000000..e0d8d56
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,194 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+
+struct mdp4_irq_wait {
+	struct mdp4_irq irq;
+	int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp4_kms *mdp4_kms)
+{
+	struct mdp4_irq *irq;
+	uint32_t irqmask = mdp4_kms->vblank_mask;
+
+	BUG_ON(!spin_is_locked(&list_lock));
+
+	list_for_each_entry(irq, &mdp4_kms->irq_list, node)
+		irqmask |= irq->irqmask;
+
+	DBG("irqmask=%08x", irqmask);
+
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+	DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
+
+	INIT_LIST_HEAD(&mdp4_kms->irq_list);
+
+	error_handler->irq = mdp4_irq_error_handler;
+	error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+			MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+	mdp4_irq_register(mdp4_kms, error_handler);
+
+	return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct drm_device *dev = mdp4_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct mdp4_irq *handler, *n;
+	unsigned long flags;
+	unsigned int id;
+	uint32_t status;
+
+	status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+	mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+	VERB("status=%08x", status);
+
+	for (id = 0; id < priv->num_crtcs; id++)
+		if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+			drm_handle_vblank(dev, id);
+
+	spin_lock_irqsave(&list_lock, flags);
+	list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
+		if (handler->irqmask & status) {
+			spin_unlock_irqrestore(&list_lock, flags);
+			handler->irq(handler, handler->irqmask & status);
+			spin_lock_irqsave(&list_lock, flags);
+		}
+	}
+	spin_unlock_irqrestore(&list_lock, flags);
+
+	return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	unsigned long flags;
+
+	pm_runtime_get_sync(mdp4_kms->dev->dev);
+	spin_lock_irqsave(&list_lock, flags);
+	mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
+	update_irq(mdp4_kms);
+	spin_unlock_irqrestore(&list_lock, flags);
+	pm_runtime_put_sync(mdp4_kms->dev->dev);
+
+	return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	unsigned long flags;
+
+	pm_runtime_get_sync(mdp4_kms->dev->dev);
+	spin_lock_irqsave(&list_lock, flags);
+	mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
+	update_irq(mdp4_kms);
+	spin_unlock_irqrestore(&list_lock, flags);
+	pm_runtime_put_sync(mdp4_kms->dev->dev);
+}
+
+static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+	struct mdp4_irq_wait *wait =
+			container_of(irq, struct mdp4_irq_wait, irq);
+	wait->count--;
+	wake_up_all(&wait_event);
+}
+
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
+{
+	struct mdp4_irq_wait wait = {
+		.irq = {
+			.irq = wait_irq,
+			.irqmask = irqmask,
+		},
+		.count = 1,
+	};
+	mdp4_irq_register(mdp4_kms, &wait.irq);
+	wait_event(wait_event, (wait.count <= 0));
+	mdp4_irq_unregister(mdp4_kms, &wait.irq);
+}
+
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+	unsigned long flags;
+
+	pm_runtime_get_sync(mdp4_kms->dev->dev);
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (!WARN_ON(irq->registered)) {
+		irq->registered = true;
+		list_add(&irq->node, &mdp4_kms->irq_list);
+		update_irq(mdp4_kms);
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+	pm_runtime_put_sync(mdp4_kms->dev->dev);
+}
+
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+	unsigned long flags;
+
+	pm_runtime_get_sync(mdp4_kms->dev->dev);
+	spin_lock_irqsave(&list_lock, flags);
+
+	if (!WARN_ON(!irq->registered)) {
+		irq->registered = false;
+		list_del(&irq->node);
+		update_irq(mdp4_kms);
+	}
+
+	spin_unlock_irqrestore(&list_lock, flags);
+	pm_runtime_put_sync(mdp4_kms->dev->dev);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 0000000..09ece17
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,359 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+#include <mach/iommu.h>
+#include <mach/iommu_domains.h>
+
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	struct drm_device *dev = mdp4_kms->dev;
+	uint32_t version, major, minor, dmap_cfg, vg_cfg;
+	unsigned long clk;
+	int ret = 0;
+
+	pm_runtime_get_sync(dev->dev);
+
+	version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+
+	major = FIELD(version, MDP4_VERSION_MAJOR);
+	minor = FIELD(version, MDP4_VERSION_MINOR);
+
+	DBG("found MDP version v%d.%d", major, minor);
+
+	if (major != 4) {
+		dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+				major, minor);
+		ret = -ENXIO;
+		goto out;
+	}
+
+	mdp4_kms->rev = minor;
+
+	if (mdp4_kms->dsi_pll_vdda) {
+		if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
+			ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
+					1200000, 1200000);
+			if (ret) {
+				dev_err(dev->dev,
+					"failed to set dsi_pll_vdda voltage: %d\n", ret);
+				goto out;
+			}
+		}
+	}
+
+	if (mdp4_kms->dsi_pll_vddio) {
+		if (mdp4_kms->rev == 2) {
+			ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
+					1800000, 1800000);
+			if (ret) {
+				dev_err(dev->dev,
+					"failed to set dsi_pll_vddio voltage: %d\n", ret);
+				goto out;
+			}
+		}
+	}
+
+	if (mdp4_kms->rev > 1) {
+		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+		mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+	}
+
+	/* make sure things are off: */
+	mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+
+	mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+	/* max read pending cmd config, 3 pending requests: */
+	mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+	clk = clk_get_rate(mdp4_kms->clk);
+
+	if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+		dmap_cfg = 0x47;     /* 16 bytes-burst x 8 req */
+		vg_cfg = 0x47;       /* 16 bytes-burs x 8 req */
+	} else {
+		dmap_cfg = 0x27;     /* 8 bytes-burst x 8 req */
+		vg_cfg = 0x43;       /* 16 bytes-burst x 4 req */
+	}
+
+	DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+	if (mdp4_kms->rev >= 2)
+		mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+
+	/* disable CSC matrix / YUV by default: */
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+	if (mdp4_kms->rev > 1)
+		mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+out:
+	pm_runtime_put_sync(dev->dev);
+
+	return ret;
+}
+
+static int mdp4_pm_suspend(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+
+	DBG("");
+
+	clk_disable_unprepare(mdp4_kms->clk);
+	if (mdp4_kms->pclk)
+		clk_disable_unprepare(mdp4_kms->pclk);
+	clk_disable_unprepare(mdp4_kms->lut_clk);
+
+	return 0;
+}
+
+static int mdp4_pm_resume(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+
+	DBG("");
+
+	clk_prepare_enable(mdp4_kms->clk);
+	if (mdp4_kms->pclk)
+		clk_prepare_enable(mdp4_kms->pclk);
+	clk_prepare_enable(mdp4_kms->lut_clk);
+
+	return 0;
+}
+
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+	/* cancel or wait for pending page flips.. */
+	DBG(""); // XXX
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+	struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+	kfree(mdp4_kms);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+		.hw_init         = mdp4_hw_init,
+		.pm_suspend      = mdp4_pm_suspend,
+		.pm_resume       = mdp4_pm_resume,
+		.preclose        = mdp4_preclose,
+		.irq_preinstall  = mdp4_irq_preinstall,
+		.irq_postinstall = mdp4_irq_postinstall,
+		.irq_uninstall   = mdp4_irq_uninstall,
+		.irq             = mdp4_irq,
+		.enable_vblank   = mdp4_enable_vblank,
+		.disable_vblank  = mdp4_disable_vblank,
+		.destroy         = mdp4_destroy,
+};
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+	struct drm_device *dev = mdp4_kms->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+	int ret;
+
+	/*
+	 *  NOTE: this is a bit simplistic until we add support
+	 * for more than just RGB1->DMA_E->DTV->HDMI
+	 */
+
+	/* the CRTCs get constructed with a private plane: */
+	plane = mdp4_plane_init(dev, RGB1, true);
+	if (IS_ERR(plane)) {
+		dev_err(dev->dev, "failed to construct plane for RGB1\n");
+		ret = PTR_ERR(plane);
+		goto fail;
+	}
+
+	crtc  = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
+	if (IS_ERR(crtc)) {
+		dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
+		ret = PTR_ERR(crtc);
+		goto fail;
+	}
+	priv->crtcs[priv->num_crtcs++] = crtc;
+
+	encoder = mdp4_dtv_encoder_init(dev);
+	if (IS_ERR(encoder)) {
+		dev_err(dev->dev, "failed to construct DTV encoder\n");
+		ret = PTR_ERR(encoder);
+		goto fail;
+	}
+	encoder->possible_crtcs = 0x1;     /* DTV can be hooked to DMA_E */
+	priv->encoders[priv->num_encoders++] = encoder;
+
+	connector = hdmi_connector_init(dev, encoder);
+	if (IS_ERR(connector)) {
+		dev_err(dev->dev, "failed to construct HDMI connector\n");
+		ret = PTR_ERR(connector);
+		goto fail;
+	}
+	priv->connectors[priv->num_connectors++] = connector;
+
+	return 0;
+
+fail:
+	return ret;
+}
+
+static const char *iommu_ports[] = {
+		"mdp_port0_cb0", "mdp_port1_cb0",
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct resource *res;
+	struct mdp4_kms *mdp4_kms;
+	struct msm_kms *kms = NULL;
+	struct iommu_domain *iommu;
+	uint32_t max_clk;
+	int i, ret;
+
+	mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+	if (!mdp4_kms) {
+		dev_err(dev->dev, "failed to allocate kms\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	kms = &mdp4_kms->base;
+	kms->funcs = &kms_funcs;
+
+	mdp4_kms->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev->dev, "failed to get memory resource\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	mdp4_kms->mmio = msm_ioremap(&pdev->dev,
+			res->start, resource_size(res), "MDP4");
+	if (!mdp4_kms->mmio) {
+		dev_err(dev->dev, "failed to ioremap\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+	if (IS_ERR(mdp4_kms->dsi_pll_vdda))
+		mdp4_kms->dsi_pll_vdda = NULL;
+
+	mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+	if (IS_ERR(mdp4_kms->dsi_pll_vddio))
+		mdp4_kms->dsi_pll_vddio = NULL;
+
+	mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+	if (IS_ERR(mdp4_kms->vdd))
+		mdp4_kms->vdd = NULL;
+
+	if (mdp4_kms->vdd)
+		regulator_enable(mdp4_kms->vdd);
+
+	mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+	if (IS_ERR(mdp4_kms->clk)) {
+		dev_err(dev->dev, "failed to get core_clk\n");
+		ret = PTR_ERR(mdp4_kms->clk);
+		goto fail;
+	}
+
+	mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(mdp4_kms->pclk))
+		mdp4_kms->pclk = NULL;
+
+	// XXX if (rev >= MDP_REV_42) { ???
+	mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+	if (IS_ERR(mdp4_kms->lut_clk)) {
+		dev_err(dev->dev, "failed to get lut_clk\n");
+		ret = PTR_ERR(mdp4_kms->lut_clk);
+		goto fail;
+	}
+
+	if (cpu_is_apq8064())
+		max_clk = 266667000;
+	else
+		max_clk = 200000000;
+
+	clk_set_rate(mdp4_kms->clk, max_clk);
+	clk_set_rate(mdp4_kms->lut_clk, max_clk);
+
+	iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+	if (!iommu) {
+		dev_err(dev->dev, "failed to get mdp4 iommu\n");
+		ret = -ENXIO;
+		goto fail;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(iommu_ports); i++) {
+		struct device *ctx = msm_iommu_get_ctx(iommu_ports[i]);
+		if (!ctx)
+			continue;
+		ret = iommu_attach_device(iommu, ctx);
+		if (ret) {
+			dev_warn(dev->dev, "could not attach iommu to %s", iommu_ports[i]);
+			goto fail;
+		}
+	}
+
+	mdp4_kms->id = msm_register_iommu(dev, iommu);
+	if (mdp4_kms->id < 0) {
+		ret = mdp4_kms->id;
+		dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
+		goto fail;
+	}
+
+	ret = modeset_init(mdp4_kms);
+	if (ret) {
+		dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+		goto fail;
+	}
+
+	return kms;
+
+fail:
+	if (kms)
+		mdp4_destroy(kms);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 0000000..c2942e8
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,161 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "mdp4.xml.h"
+
+
+/* For transiently registering for different MDP4 irqs that various parts
+ * of the KMS code need during setup/configuration.  We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp4_irq {
+	struct list_head node;
+	uint32_t irqmask;
+	bool registered;
+	void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
+};
+
+struct mdp4_kms {
+	struct msm_kms base;
+
+	struct drm_device *dev;
+
+	int rev;
+
+	/* mapper-id used to request GEM buffer mapped for scanout: */
+	int id;
+
+	void __iomem *mmio;
+
+	struct regulator *dsi_pll_vdda;
+	struct regulator *dsi_pll_vddio;
+	struct regulator *vdd;
+
+	struct clk *clk;
+	struct clk *pclk;
+	struct clk *lut_clk;
+
+	/* irq handling: */
+	struct list_head irq_list;    /* list of mdp4_irq */
+	uint32_t vblank_mask;         /* irq bits set for userspace vblank */
+	struct mdp4_irq error_handler;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+	msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+	return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
+{
+	switch (pipe) {
+	case VG1:      return MDP4_OVERLAY_FLUSH_VG1;
+	case VG2:      return MDP4_OVERLAY_FLUSH_VG2;
+	case RGB1:     return MDP4_OVERLAY_FLUSH_RGB1;
+	case RGB2:     return MDP4_OVERLAY_FLUSH_RGB1;
+	default:       return 0;
+	}
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+	switch (ovlp) {
+	case 0:        return MDP4_OVERLAY_FLUSH_OVLP0;
+	case 1:        return MDP4_OVERLAY_FLUSH_OVLP1;
+	default:       return 0;
+	}
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+	switch (dma) {
+	case DMA_P:    return MDP4_IRQ_DMA_P_DONE | MDP4_IRQ_PRIMARY_VSYNC;
+	case DMA_S:    return MDP4_IRQ_DMA_S_DONE;
+	case DMA_E:    return MDP4_IRQ_DMA_E_DONE | MDP4_IRQ_EXTERNAL_VSYNC;
+	default:       return 0;
+	}
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void mdp4_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj);
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+		struct drm_framebuffer *fb);
+int mdp4_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h);
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+		enum mpd4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+		struct drm_plane *plane, int id, int ovlp_id,
+		enum mdp4_dma dma_id);
+
+struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+struct drm_encoder *mdp4_dsi_encoder_init(struct drm_device *dev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static inline int match_dev_name(struct device *dev, void *data)
+{
+	return !strcmp(dev_name(dev), data);
+}
+/* bus scaling data is associated with extra pointless platform devices,
+ * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
+ * to find their pdata to make the bus-scaling stuff work.
+ */
+static inline void *mdp4_find_pdata(const char *devname)
+{
+	struct device *dev;
+	dev = bus_find_device(&platform_bus_type, NULL,
+			(void *)devname, match_dev_name);
+	return dev ? dev->platform_data : NULL;
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 0000000..cb52070
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,241 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+
+struct mdp4_plane {
+	struct drm_plane base;
+	const char *name;
+
+	enum mpd4_pipe pipe;
+
+	uint32_t nformats;
+	uint32_t formats[32];
+
+	bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+	struct msm_drm_private *priv = plane->dev->dev_private;
+	return to_mdp4_kms(priv->kms);
+}
+
+static int mdp4_plane_update(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+	mdp4_plane->enabled = true;
+
+	if (plane->fb)
+		drm_framebuffer_unreference(plane->fb);
+
+	drm_framebuffer_reference(fb);
+
+	return mdp4_plane_mode_set(plane, crtc, fb,
+			crtc_x, crtc_y, crtc_w, crtc_h,
+			src_x, src_y, src_w, src_h);
+}
+
+static int mdp4_plane_disable(struct drm_plane *plane)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+	DBG("%s: TODO", mdp4_plane->name); // XXX
+	return 0;
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+	mdp4_plane_disable(plane);
+	drm_plane_cleanup(plane);
+
+	kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp4_plane_install_properties(struct drm_plane *plane,
+		struct drm_mode_object *obj)
+{
+	// XXX
+}
+
+int mdp4_plane_set_property(struct drm_plane *plane,
+		struct drm_property *property, uint64_t val)
+{
+	// XXX
+	return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+		.update_plane = mdp4_plane_update,
+		.disable_plane = mdp4_plane_disable,
+		.destroy = mdp4_plane_destroy,
+		.set_property = mdp4_plane_set_property,
+};
+
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+		struct drm_framebuffer *fb)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+	struct mdp4_kms *mdp4_kms = get_kms(plane);
+	enum mpd4_pipe pipe = mdp4_plane->pipe;
+	uint32_t iova;
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+			MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+			MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+			MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+			MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+	msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+
+	plane->fb = fb;
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT	0x20000000
+
+int mdp4_plane_mode_set(struct drm_plane *plane,
+		struct drm_crtc *crtc, struct drm_framebuffer *fb,
+		int crtc_x, int crtc_y,
+		unsigned int crtc_w, unsigned int crtc_h,
+		uint32_t src_x, uint32_t src_y,
+		uint32_t src_w, uint32_t src_h)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+	struct mdp4_kms *mdp4_kms = get_kms(plane);
+	enum mpd4_pipe pipe = mdp4_plane->pipe;
+	uint32_t op_mode = 0;
+	uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+	uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+
+	/* src values are in Q16 fixed point, convert to integer: */
+	src_x = src_x >> 16;
+	src_y = src_y >> 16;
+	src_w = src_w >> 16;
+	src_h = src_h >> 16;
+
+	if (src_w != crtc_w) {
+		op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+		/* TODO calc phasex_step */
+	}
+
+	if (src_h != crtc_h) {
+		op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+		/* TODO calc phasey_step */
+	}
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+			MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+			MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+			MDP4_PIPE_SRC_XY_X(src_x) |
+			MDP4_PIPE_SRC_XY_Y(src_y));
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+			MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+			MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+			MDP4_PIPE_SRC_XY_X(crtc_x) |
+			MDP4_PIPE_SRC_XY_Y(crtc_y));
+
+	mdp4_plane_set_scanout(plane, fb);
+
+	/* TODO don't hard-code format/unpack: */
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+			MDP4_PIPE_SRC_FORMAT_A_BPC(BPC8) |
+			MDP4_PIPE_SRC_FORMAT_R_BPC(BPC8) |
+			MDP4_PIPE_SRC_FORMAT_G_BPC(BPC8) |
+			MDP4_PIPE_SRC_FORMAT_B_BPC(BPC8) |
+			MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE |
+			MDP4_PIPE_SRC_FORMAT_CPP(4-1) |
+			MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(4-1) |
+			MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT);
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+			MDP4_PIPE_SRC_UNPACK_ELEM0(1) |
+			MDP4_PIPE_SRC_UNPACK_ELEM1(0) |
+			MDP4_PIPE_SRC_UNPACK_ELEM2(2) |
+			MDP4_PIPE_SRC_UNPACK_ELEM3(3));
+
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+	mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+	plane->crtc = crtc;
+
+	return 0;
+}
+
+static const char *pipe_names[] = {
+		"VG1", "VG2",
+		"RGB1", "RGB2", "RGB3",
+		"VG3", "VG4",
+};
+
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+	struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+	return mdp4_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+		enum mpd4_pipe pipe_id, bool private_plane)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_plane *plane = NULL;
+	struct mdp4_plane *mdp4_plane;
+	int ret;
+
+	mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+	if (!mdp4_plane) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	plane = &mdp4_plane->base;
+
+	mdp4_plane->pipe = pipe_id;
+	mdp4_plane->name = pipe_names[pipe_id];
+
+	drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
+			mdp4_plane->formats, mdp4_plane->nformats, private_plane);
+
+	mdp4_plane_install_properties(plane, &plane->base);
+
+	return plane;
+
+fail:
+	if (plane)
+		mdp4_plane_destroy(plane);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_connector.c b/drivers/gpu/drm/msm/msm_connector.c
new file mode 100644
index 0000000..aeea887
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.c
@@ -0,0 +1,34 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_connector.h"
+
+void msm_connector_init(struct msm_connector *connector,
+		const struct msm_connector_funcs *funcs,
+		struct drm_encoder *encoder)
+{
+	connector->funcs = funcs;
+	connector->encoder = encoder;
+}
+
+struct drm_encoder *msm_connector_attached_encoder(
+		struct drm_connector *connector)
+{
+	struct msm_connector *msm_connector = to_msm_connector(connector);
+	return msm_connector->encoder;
+}
diff --git a/drivers/gpu/drm/msm/msm_connector.h b/drivers/gpu/drm/msm/msm_connector.h
new file mode 100644
index 0000000..0b41866
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_connector.h
@@ -0,0 +1,68 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_CONNECTOR_H__
+#define __MSM_CONNECTOR_H__
+
+#include "msm_drv.h"
+
+/*
+ * Base class for MSM connectors.  Typically a connector is a bit more
+ * passive.  But with the split between (for example) DTV within MDP4,
+ * and HDMI encoder, we really need two parts to an encoder.  Instead
+ * what we do is have the part external to the display controller block
+ * in the connector, which is called from the encoder to delegate the
+ * appropriate parts of modeset.
+ */
+
+struct msm_connector;
+
+struct msm_connector_funcs {
+	void (*dpms)(struct msm_connector *connector, int mode);
+	void (*mode_set)(struct msm_connector *connector,
+			struct drm_display_mode *mode);
+};
+
+struct msm_connector {
+	struct drm_connector base;
+	struct drm_encoder *encoder;
+	const struct msm_connector_funcs *funcs;
+};
+#define to_msm_connector(x) container_of(x, struct msm_connector, base)
+
+void msm_connector_init(struct msm_connector *connector,
+		const struct msm_connector_funcs *funcs,
+		struct drm_encoder *encoder);
+
+struct drm_encoder *msm_connector_attached_encoder(
+		struct drm_connector *connector);
+
+static inline struct msm_connector *get_connector(struct drm_encoder *encoder)
+{
+	struct msm_drm_private *priv = encoder->dev->dev_private;
+	int i;
+
+	for (i = 0; i < priv->num_connectors; i++) {
+		struct drm_connector *connector = priv->connectors[i];
+		if (msm_connector_attached_encoder(connector) == encoder)
+			return to_msm_connector(connector);
+	}
+
+	return NULL;
+}
+
+#endif /* __MSM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 0000000..e6ccef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,491 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+static void msm_fb_output_poll_changed(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	if (priv->fbdev)
+		drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+	.fb_create = msm_framebuffer_create,
+	.output_poll_changed = msm_fb_output_poll_changed,
+};
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+		unsigned long iova, int flags)
+{
+	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+	return 0;
+}
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	int idx = priv->num_iommus++;
+
+	if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+		return -EINVAL;
+
+	priv->iommus[idx] = iommu;
+
+	iommu_set_fault_handler(iommu, msm_fault_handler);
+
+	/* need to iommu_attach_device() somewhere??  on resume?? */
+
+	return idx;
+}
+
+#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
+static bool reglog = false;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+void __iomem *msm_ioremap(struct device *dev, resource_size_t offset,
+		unsigned long size, const char *name)
+{
+	void __iomem *ptr = devm_ioremap_nocache(dev, offset, size);
+	if (reglog)
+		printk(KERN_DEBUG "IO:region %s %08x %08lx\n", name, (u32)ptr, size);
+	return ptr;
+}
+
+void msm_writel(u32 data, void __iomem *addr)
+{
+	if (reglog)
+		printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+	writel(data, addr);
+}
+
+u32 msm_readl(const void __iomem *addr)
+{
+	u32 val = readl(addr);
+	if (reglog)
+		printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+	return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+static int msm_unload(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	drm_kms_helper_poll_fini(dev);
+	drm_mode_config_cleanup(dev);
+	drm_vblank_cleanup(dev);
+
+	pm_runtime_get_sync(dev->dev);
+	drm_irq_uninstall(dev);
+	pm_runtime_put_sync(dev->dev);
+
+	flush_workqueue(priv->wq);
+	destroy_workqueue(priv->wq);
+
+	if (kms) {
+		pm_runtime_disable(dev->dev);
+		kms->funcs->destroy(kms);
+	}
+
+	dev->dev_private = NULL;
+
+	pm_runtime_disable(dev->dev);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct platform_device *pdev = dev->platformdev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+	int ret;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		dev_err(dev->dev, "failed to allocate private data\n");
+		return -ENOMEM;
+	}
+
+	dev->dev_private = priv;
+
+	priv->wq = alloc_ordered_workqueue("msm", 0);
+
+	INIT_LIST_HEAD(&priv->obj_list);
+
+	drm_mode_config_init(dev);
+
+	kms = mdp4_kms_init(dev);
+	if (IS_ERR(kms)) {
+		/*
+		 * NOTE: once we have GPU support, having no kms should not
+		 * be considered fatal.. ideally we would still support gpu
+		 * and (for example) use dmabuf/prime to share buffers with
+		 * imx drm driver on iMX5
+		 */
+		dev_err(dev->dev, "failed to load kms\n");
+		ret = PTR_ERR(priv->kms);
+		goto fail;
+	}
+
+	priv->kms = kms;
+
+	if (kms) {
+		pm_runtime_enable(dev->dev);
+		ret = kms->funcs->hw_init(kms);
+		if (ret) {
+			dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	dev->mode_config.max_width = 2048;
+	dev->mode_config.max_height = 2048;
+	dev->mode_config.funcs = &mode_config_funcs;
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to initialize vblank\n");
+		goto fail;
+	}
+
+	pm_runtime_get_sync(dev->dev);
+	ret = drm_irq_install(dev);
+	pm_runtime_put_sync(dev->dev);
+	if (ret < 0) {
+		dev_err(dev->dev, "failed to install IRQ handler\n");
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, dev);
+
+#ifdef CONFIG_DRM_MSM_FBDEV
+	priv->fbdev = msm_fbdev_init(dev);
+#endif
+
+	drm_kms_helper_poll_init(dev);
+
+	return 0;
+
+fail:
+	msm_unload(dev);
+	return ret;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	if (kms)
+		kms->funcs->preclose(kms, file);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	if (priv->fbdev) {
+		drm_modeset_lock_all(dev);
+		drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+		drm_modeset_unlock_all(dev);
+	}
+}
+
+static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = arg;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	BUG_ON(!kms);
+	return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	BUG_ON(!kms);
+	kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	BUG_ON(!kms);
+	return kms->funcs->irq_postinstall(kms);
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	BUG_ON(!kms);
+	kms->funcs->irq_uninstall(kms);
+}
+
+static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	if (!kms)
+		return -ENXIO;
+	DBG("dev=%p, crtc=%d", dev, crtc_id);
+	return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	if (!kms)
+		return;
+	DBG("dev=%p, crtc=%d", dev, crtc_id);
+	kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int msm_gem_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "All Objects:\n");
+	msm_gem_describe_objects(&priv->obj_list, m);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int msm_mm_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int msm_fb_show(struct seq_file *m, void *arg)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+	if (priv->fbdev) {
+		seq_printf(m, "fbcon ");
+		fbdev_fb = priv->fbdev->fb;
+		msm_framebuffer_describe(fbdev_fb, m);
+	}
+
+	mutex_lock(&dev->mode_config.fb_lock);
+	list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+		if (fb == fbdev_fb)
+			continue;
+
+		seq_printf(m, "user ");
+		msm_framebuffer_describe(fb, m);
+	}
+	mutex_unlock(&dev->mode_config.fb_lock);
+
+	return 0;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+		{"gem", msm_gem_show, 0},
+		{ "mm", msm_mm_show,   0 },
+		{ "fb", msm_fb_show, 0 },
+};
+
+static int msm_debugfs_init(struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	ret = drm_debugfs_create_files(msm_debugfs_list,
+			ARRAY_SIZE(msm_debugfs_list),
+			minor->debugfs_root, minor);
+
+	if (ret) {
+		dev_err(dev->dev, "could not install msm_debugfs_list\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+	drm_debugfs_remove_files(msm_debugfs_list,
+			ARRAY_SIZE(msm_debugfs_list), minor);
+}
+#endif
+
+static const struct vm_operations_struct vm_ops = {
+	.fault = msm_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+	.owner              = THIS_MODULE,
+	.open               = drm_open,
+	.release            = drm_release,
+	.unlocked_ioctl     = drm_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl       = drm_compat_ioctl,
+#endif
+	.poll               = drm_poll,
+	.read               = drm_read,
+	.fasync             = drm_fasync,
+	.llseek             = no_llseek,
+	.mmap               = msm_gem_mmap,
+};
+
+static struct drm_driver msm_driver = {
+	.driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+	.load               = msm_load,
+	.unload             = msm_unload,
+	.preclose           = msm_preclose,
+	.lastclose          = msm_lastclose,
+	.irq_handler        = msm_irq,
+	.irq_preinstall     = msm_irq_preinstall,
+	.irq_postinstall    = msm_irq_postinstall,
+	.irq_uninstall      = msm_irq_uninstall,
+	.get_vblank_counter = drm_vblank_count,
+	.enable_vblank      = msm_enable_vblank,
+	.disable_vblank     = msm_disable_vblank,
+	.gem_free_object    = msm_gem_free_object,
+	.gem_vm_ops         = &vm_ops,
+	.dumb_create        = msm_gem_dumb_create,
+	.dumb_map_offset    = msm_gem_dumb_map_offset,
+	.dumb_destroy       = msm_gem_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+	.debugfs_init       = msm_debugfs_init,
+	.debugfs_cleanup    = msm_debugfs_cleanup,
+#endif
+	.fops               = &fops,
+	.name               = "msm",
+	.desc               = "MSM Snapdragon DRM",
+	.date               = "20130625",
+	.major              = 1,
+	.minor              = 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_pm_suspend(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	drm_kms_helper_poll_disable(ddev);
+
+	return kms->funcs->pm_suspend(kms);
+}
+
+static int msm_pm_resume(struct device *dev)
+{
+	struct drm_device *ddev = dev_get_drvdata(dev);
+	struct msm_drm_private *priv = ddev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	int ret = 0;
+
+	ret = kms->funcs->pm_resume(kms);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(ddev);
+
+	return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+	return drm_platform_init(&msm_driver, pdev);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+	drm_platform_exit(&msm_driver, pdev);
+
+	return 0;
+}
+
+static const struct platform_device_id msm_id[] = {
+	{ "mdp", 0 },
+	{ }
+};
+
+static struct platform_driver msm_platform_driver = {
+	.probe      = msm_pdev_probe,
+	.remove     = msm_pdev_remove,
+	.driver     = {
+		.owner  = THIS_MODULE,
+		.name   = "msm",
+		.pm     = &msm_pm_ops,
+	},
+	.id_table   = msm_id,
+};
+
+static int __init msm_drm_init(void)
+{
+	DBG("init");
+	hdmi_init();
+	return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_fini(void)
+{
+	DBG("fini");
+	platform_driver_unregister(&msm_platform_driver);
+	hdmi_fini();
+}
+
+module_init(msm_drm_init);
+module_exit(msm_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("MSM DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 0000000..85cdcf8
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,161 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRV_H__
+#define __MSM_DRV_H__
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+
+struct msm_kms;
+
+#define NUM_DOMAINS 1    /* one for KMS, then one per gpu core (?) */
+
+struct msm_drm_private {
+
+	struct msm_kms *kms;
+
+	struct drm_fb_helper *fbdev;
+
+	/* list of GEM objects: */
+	struct list_head obj_list;
+
+	struct workqueue_struct *wq;
+
+	/* registered IOMMU domains: */
+	unsigned int num_iommus;
+	struct iommu_domain *iommus[NUM_DOMAINS];
+
+	unsigned int num_crtcs;
+	struct drm_crtc *crtcs[8];
+
+	unsigned int num_encoders;
+	struct drm_encoder *encoders[8];
+
+	unsigned int num_connectors;
+	struct drm_connector *connectors[8];
+};
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime.  The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+	/* hw initialization: */
+	int (*hw_init)(struct msm_kms *kms);
+	/* pm: */
+	int (*pm_suspend)(struct msm_kms *kms);
+	int (*pm_resume)(struct msm_kms *kms);
+	/* cancel or wait for pending pageflip, etc: */
+	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+	/* irq handling: */
+	void (*irq_preinstall)(struct msm_kms *kms);
+	int (*irq_postinstall)(struct msm_kms *kms);
+	void (*irq_uninstall)(struct msm_kms *kms);
+	irqreturn_t (*irq)(struct msm_kms *kms);
+	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+	/* cleanup: */
+	void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+	const struct msm_kms_funcs *funcs;
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+		struct drm_mode_create_dumb *args);
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle);
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle, uint64_t *offset);
+void *msm_gem_vaddr(struct drm_gem_object *obj);
+void msm_gem_free_object(struct drm_gem_object *obj);
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		uint32_t size, uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+		uint32_t size, uint32_t flags);
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+		struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+		struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+
+struct drm_connector *hdmi_connector_init(struct drm_device *dev,
+		struct drm_encoder *encoder);
+void __init hdmi_init(void);
+void __exit hdmi_fini(void);
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+#endif
+
+void __iomem *msm_ioremap(struct device *dev, resource_size_t offset,
+		unsigned long size, const char *name);
+void msm_writel(u32 data, void __iomem *addr);
+u32 msm_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+static inline int align_pitch(int width, int bpp)
+{
+	int bytespp = (bpp + 7) / 8;
+	/* adreno needs pitch aligned to 32 pixels: */
+	return bytespp * ALIGN(width, 32);
+}
+
+/* for the generated headers: */
+#define INVALID_IDX(idx) ({BUG(); 0;})
+
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* just put these here until we start adding driver private ioctls: */
+// TODO might shuffle these around.. just need something for now..
+#define MSM_BO_SCANOUT		0x00000001	/* scanout capable (phys contiguous) */
+#define MSM_BO_WC		0x00000002
+#define MSM_BO_CACHED		0x00000004
+#define MSM_BO_UNCACHED	0x00000004
+
+
+#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 0000000..8c2653a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,216 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/* per-plane info for the fb: */
+struct plane {
+	struct drm_gem_object *bo;
+	uint32_t pitch;
+	uint32_t offset;
+};
+
+struct msm_framebuffer {
+	struct drm_framebuffer base;
+//XXX	const struct format *format;
+	struct plane planes[2];
+};
+#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+
+
+static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
+		struct drm_file *file_priv,
+		unsigned int *handle)
+{
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	return drm_gem_handle_create(file_priv,
+			msm_fb->planes[0].bo, handle);
+}
+
+static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	int i, n = drm_format_num_planes(fb->pixel_format);
+
+	DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+	drm_framebuffer_cleanup(fb);
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &msm_fb->planes[i];
+		if (plane->bo)
+			drm_gem_object_unreference_unlocked(plane->bo);
+	}
+
+	kfree(msm_fb);
+}
+
+static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
+		struct drm_file *file_priv, unsigned flags, unsigned color,
+		struct drm_clip_rect *clips, unsigned num_clips)
+{
+	return 0;
+}
+
+static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
+	.create_handle = msm_framebuffer_create_handle,
+	.destroy = msm_framebuffer_destroy,
+	.dirty = msm_framebuffer_dirty,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	int i, n = drm_format_num_planes(fb->pixel_format);
+
+	seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
+			(char *)&fb->pixel_format);
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &msm_fb->planes[i];
+		seq_printf(m, "   %d: offset=%d pitch=%d, obj: ",
+				i, plane->offset, plane->pitch);
+		msm_gem_describe(plane->bo, m);
+	}
+}
+#endif
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
+{
+	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+	return msm_fb->planes[plane].bo;
+}
+
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+		struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+	struct drm_gem_object *bos[4] = {0};
+	struct drm_framebuffer *fb;
+	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+	for (i = 0; i < n; i++) {
+		bos[i] = drm_gem_object_lookup(dev, file,
+				mode_cmd->handles[i]);
+		if (!bos[i]) {
+			ret = -ENXIO;
+			goto out_unref;
+		}
+	}
+
+	fb = msm_framebuffer_init(dev, mode_cmd, bos);
+	if (IS_ERR(fb)) {
+		ret = PTR_ERR(fb);
+		goto out_unref;
+	}
+
+	return fb;
+
+out_unref:
+	for (i = 0; i < n; i++)
+		drm_gem_object_unreference_unlocked(bos[i]);
+	return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+		struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+	struct msm_framebuffer *msm_fb;
+	struct drm_framebuffer *fb = NULL;
+	int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+			(char *)&mode_cmd->pixel_format);
+
+/* XXX we need to get info about supported formats from the kms
+ * implementation.. since potentially (probably?) different between
+ * mdp4, mdss, etc..
+	const struct format *format = NULL;
+	for (i = 0; i < ARRAY_SIZE(formats); i++) {
+		if (formats[i].pixel_format == mode_cmd->pixel_format) {
+			format = &formats[i];
+			break;
+		}
+	}
+
+	if (!format) {
+		dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+				(char *)&mode_cmd->pixel_format);
+		ret = -EINVAL;
+		goto fail;
+	}
+ */
+
+	msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
+	if (!msm_fb) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fb = &msm_fb->base;
+
+//	msm_fb->format = format;
+
+	for (i = 0; i < n; i++) {
+		struct plane *plane = &msm_fb->planes[i];
+
+/*
+		int size, pitch = mode_cmd->pitches[i];
+		if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
+			dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
+					pitch, mode_cmd->width * format->planes[i].stride_bpp);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		size = pitch * mode_cmd->height / format->planes[i].sub_y;
+
+		if (size > (msm_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
+			dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
+					bos[i]->size - mode_cmd->offsets[i], size);
+			ret = -EINVAL;
+			goto fail;
+		}
+*/
+
+		plane->bo     = bos[i];
+		plane->offset = mode_cmd->offsets[i];
+		plane->pitch  = mode_cmd->pitches[i];
+	}
+
+	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+	ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
+	if (ret) {
+		dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+		goto fail;
+	}
+
+	DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+	return fb;
+
+fail:
+	if (fb)
+		msm_framebuffer_destroy(fb);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 0000000..b7c1390a
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,255 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
+
+struct msm_fbdev {
+	struct drm_fb_helper base;
+	struct drm_framebuffer *fb;
+	struct drm_gem_object *bo;
+};
+
+static struct fb_ops msm_fb_ops = {
+	.owner = THIS_MODULE,
+
+	/* Note: to properly handle manual update displays, we wrap the
+	 * basic fbdev ops which write to the framebuffer
+	 */
+	.fb_read = fb_sys_read,
+	.fb_write = fb_sys_write,
+	.fb_fillrect = sys_fillrect,
+	.fb_copyarea = sys_copyarea,
+	.fb_imageblit = sys_imageblit,
+
+	.fb_check_var = drm_fb_helper_check_var,
+	.fb_set_par = drm_fb_helper_set_par,
+	.fb_pan_display = drm_fb_helper_pan_display,
+	.fb_blank = drm_fb_helper_blank,
+	.fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int msm_fbdev_create(struct drm_fb_helper *helper,
+		struct drm_fb_helper_surface_size *sizes)
+{
+	struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+	struct drm_device *dev = helper->dev;
+	struct drm_framebuffer *fb = NULL;
+	struct fb_info *fbi = NULL;
+	struct drm_mode_fb_cmd2 mode_cmd = {0};
+	dma_addr_t paddr;
+	int ret, size;
+
+	/* only doing ARGB32 since this is what is needed to alpha-blend
+	 * with video overlays:
+	 */
+	sizes->surface_bpp = 32;
+	sizes->surface_depth = 32;
+
+	DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+			sizes->surface_height, sizes->surface_bpp,
+			sizes->fb_width, sizes->fb_height);
+
+	mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+			sizes->surface_depth);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+
+	mode_cmd.pitches[0] = align_pitch(
+			mode_cmd.width, sizes->surface_bpp);
+
+	/* allocate backing bo */
+	size = mode_cmd.pitches[0] * mode_cmd.height;
+	DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+	fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+	if (!fbdev->bo) {
+		dev_err(dev->dev, "failed to allocate buffer object\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+	if (IS_ERR(fb)) {
+		dev_err(dev->dev, "failed to allocate fb\n");
+		/* note: if fb creation failed, we can't rely on fb destroy
+		 * to unref the bo:
+		 */
+		drm_gem_object_unreference(fbdev->bo);
+		ret = PTR_ERR(fb);
+		goto fail;
+	}
+
+	/* TODO implement our own fb_mmap so we don't need this: */
+	msm_gem_get_iova(fbdev->bo, 0, &paddr);
+
+	mutex_lock(&dev->struct_mutex);
+
+	fbi = framebuffer_alloc(0, dev->dev);
+	if (!fbi) {
+		dev_err(dev->dev, "failed to allocate fb info\n");
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	DBG("fbi=%p, dev=%p", fbi, dev);
+
+	fbdev->fb = fb;
+	helper->fb = fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_DEFAULT;
+	fbi->fbops = &msm_fb_ops;
+
+	strcpy(fbi->fix.id, "msm");
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+	dev->mode_config.fb_base = paddr;
+
+	fbi->screen_base = msm_gem_vaddr(fbdev->bo);
+	fbi->screen_size = fbdev->bo->size;
+	fbi->fix.smem_start = paddr;
+	fbi->fix.smem_len = fbdev->bo->size;
+
+	DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+	DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+
+fail_unlock:
+	mutex_unlock(&dev->struct_mutex);
+fail:
+
+	if (ret) {
+		if (fbi)
+			framebuffer_release(fbi);
+		if (fb) {
+			drm_framebuffer_unregister_private(fb);
+			drm_framebuffer_remove(fb);
+		}
+	}
+
+	return ret;
+}
+
+static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
+		u16 red, u16 green, u16 blue, int regno)
+{
+	DBG("fbdev: set gamma");
+}
+
+static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
+		u16 *red, u16 *green, u16 *blue, int regno)
+{
+	DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+	.gamma_set = msm_crtc_fb_gamma_set,
+	.gamma_get = msm_crtc_fb_gamma_get,
+	.fb_probe = msm_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_fbdev *fbdev = NULL;
+	struct drm_fb_helper *helper;
+	int ret = 0;
+
+	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev)
+		goto fail;
+
+	helper = &fbdev->base;
+
+	helper->funcs = &msm_fb_helper_funcs;
+
+	ret = drm_fb_helper_init(dev, helper,
+			priv->num_crtcs, priv->num_connectors);
+	if (ret) {
+		dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+		goto fail;
+	}
+
+	drm_fb_helper_single_add_all_connectors(helper);
+
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
+	drm_fb_helper_initial_config(helper, 32);
+
+	priv->fbdev = helper;
+
+	return helper;
+
+fail:
+	kfree(fbdev);
+	return NULL;
+}
+
+void msm_fbdev_free(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_fb_helper *helper = priv->fbdev;
+	struct msm_fbdev *fbdev;
+	struct fb_info *fbi;
+
+	DBG();
+
+	fbi = helper->fbdev;
+
+	/* only cleanup framebuffer if it is present */
+	if (fbi) {
+		unregister_framebuffer(fbi);
+		framebuffer_release(fbi);
+	}
+
+	drm_fb_helper_fini(helper);
+
+	fbdev = to_msm_fbdev(priv->fbdev);
+
+	/* this will free the backing object */
+	if (fbdev->fb) {
+		drm_framebuffer_unregister_private(fbdev->fb);
+		drm_framebuffer_remove(fbdev->fb);
+	}
+
+	kfree(fbdev);
+
+	priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 0000000..a996490
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,441 @@ 
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "msm_drv.h"
+
+struct msm_gem_object {
+	struct drm_gem_object base;
+
+	struct list_head mm_list;
+
+	uint32_t flags;
+	struct page **pages;
+	struct sg_table *sgt;
+	void *vaddr;
+
+	struct {
+		// XXX
+		uint32_t iova;
+	} domain[NUM_DOMAINS];
+};
+#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+
+/* called with dev->struct_mutex held */
+/* TODO move this into drm_gem.c */
+static struct page **attach_pages(struct drm_gem_object *obj)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	struct page *p, **pages;
+	int i, npages;
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = file_inode(obj->filp);
+	mapping = inode->i_mapping;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < npages; i++) {
+		p = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(p))
+			goto fail;
+		pages[i] = p;
+	}
+
+	return pages;
+
+fail:
+	while (i--)
+		page_cache_release(pages[i]);
+
+	drm_free_large(pages);
+	return ERR_CAST(p);
+}
+
+static void detach_pages(struct drm_gem_object *obj, struct page **pages)
+{
+	int i, npages;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	for (i = 0; i < npages; i++) {
+		set_page_dirty(pages[i]);
+
+		/* Undo the reference we took when populating the table */
+		page_cache_release(pages[i]);
+	}
+
+	drm_free_large(pages);
+}
+
+
+/* called with dev->struct_mutex held */
+static struct page **get_pages(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (!msm_obj->pages) {
+		struct page **p = attach_pages(obj);
+		int npages = obj->size >> PAGE_SHIFT;
+
+		if (IS_ERR(p)) {
+			dev_err(obj->dev->dev, "could not get pages: %ld\n",
+					PTR_ERR(p));
+			return p;
+		}
+		msm_obj->pages = p;
+		msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+	}
+
+	return msm_obj->pages;
+}
+
+static void put_pages(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (!msm_obj->pages) {
+		if (msm_obj->sgt) {
+			sg_free_table(msm_obj->sgt);
+			kfree(msm_obj->sgt);
+		}
+		detach_pages(obj, msm_obj->pages);
+		msm_obj->pages = NULL;
+	}
+}
+
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+		struct vm_area_struct *vma)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	if (msm_obj->flags & MSM_BO_WC) {
+		vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+	} else if (msm_obj->flags & MSM_BO_UNCACHED) {
+		vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+	} else {
+		/*
+		 * Shunt off cached objs to shmem file so they have their own
+		 * address_space (so unmap_mapping_range does what we want,
+		 * in particular in the case of mmap'd dmabufs)
+		 */
+		fput(vma->vm_file);
+		get_file(obj->filp);
+		vma->vm_pgoff = 0;
+		vma->vm_file  = obj->filp;
+
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+	}
+
+	return 0;
+}
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	ret = drm_gem_mmap(filp, vma);
+	if (ret) {
+		DBG("mmap failed: %d", ret);
+		return ret;
+	}
+
+	return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct drm_device *dev = obj->dev;
+	struct page **pages;
+	unsigned long pfn;
+	pgoff_t pgoff;
+	int ret;
+
+	/* Make sure we don't parallel update on a fault, nor move or remove
+	 * something from beneath our feet
+	 */
+	mutex_lock(&dev->struct_mutex);
+
+	/* make sure we have pages attached now */
+	pages = get_pages(obj);
+	if (IS_ERR(pages)) {
+		ret = PTR_ERR(pages);
+		goto out;
+	}
+
+	/* We don't use vmf->pgoff since that has the fake offset: */
+	pgoff = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+
+	pfn = page_to_pfn(msm_obj->pages[pgoff]);
+
+	VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+			pfn, pfn << PAGE_SHIFT);
+
+	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+out:
+	mutex_unlock(&dev->struct_mutex);
+	switch (ret) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		return VM_FAULT_NOPAGE;
+	case -ENOMEM:
+		return VM_FAULT_OOM;
+	default:
+		return VM_FAULT_SIGBUS;
+	}
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (!obj->map_list.map) {
+		/* Make it mmapable */
+		int ret = drm_gem_create_mmap_offset(obj);
+
+		if (ret) {
+			dev_err(dev->dev, "could not allocate mmap offset\n");
+			return 0;
+		}
+	}
+
+	return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	uint64_t offset;
+	mutex_lock(&obj->dev->struct_mutex);
+	offset = mmap_offset(obj);
+	mutex_unlock(&obj->dev->struct_mutex);
+	return offset;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int ret = 0;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	if (!msm_obj->domain[id].iova) {
+		struct msm_drm_private *priv = obj->dev->dev_private;
+		uint32_t offset = (uint32_t)mmap_offset(obj);
+		get_pages(obj);
+		ret = iommu_map_range(priv->iommus[id], offset,
+				msm_obj->sgt->sgl, obj->size, IOMMU_READ);
+		msm_obj->domain[id].iova = offset;
+	}
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	if (!ret)
+		*iova = msm_obj->domain[id].iova;
+
+	return ret;
+}
+
+void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+{
+}
+
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+		struct drm_mode_create_dumb *args)
+{
+	args->pitch = align_pitch(args->width, args->bpp);
+	args->size  = PAGE_ALIGN(args->pitch * args->height);
+	return msm_gem_new_handle(dev, file, args->size,
+			MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+}
+
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle)
+{
+	/* No special work needed, drop the reference and see what falls out */
+	return drm_gem_handle_delete(file, handle);
+}
+
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+		uint32_t handle, uint64_t *offset)
+{
+	struct drm_gem_object *obj;
+	int ret = 0;
+
+	/* GEM does all our handle to object mapping */
+	obj = drm_gem_object_lookup(dev, file, handle);
+	if (obj == NULL) {
+		ret = -ENOENT;
+		goto fail;
+	}
+
+	*offset = msm_gem_mmap_offset(obj);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+fail:
+	return ret;
+}
+
+void *msm_gem_vaddr(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+	if (!msm_obj->vaddr) {
+		struct page **pages = get_pages(obj);
+		if (IS_ERR(pages))
+			return ERR_CAST(pages);
+		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+	}
+	return msm_obj->vaddr;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	uint64_t off = 0;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (obj->map_list.map)
+		off = (uint64_t)obj->map_list.hash.key;
+
+	seq_printf(m, "%08x: %2d (%2d) %08llx %p %d\n",
+			msm_obj->flags, obj->name, obj->refcount.refcount.counter,
+			off, msm_obj->vaddr, obj->size);
+}
+
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+	struct msm_gem_object *msm_obj;
+	int count = 0;
+	size_t size = 0;
+
+	list_for_each_entry(msm_obj, list, mm_list) {
+		struct drm_gem_object *obj = &msm_obj->base;
+		seq_printf(m, "   ");
+		msm_gem_describe(obj, m);
+		count++;
+		size += obj->size;
+	}
+
+	seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+void msm_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	int id;
+
+	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	list_del(&msm_obj->mm_list);
+
+	if (obj->map_list.map)
+		drm_gem_free_mmap_offset(obj);
+
+	if (msm_obj->vaddr)
+		vunmap(msm_obj->vaddr);
+
+	for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+		if (msm_obj->domain[id].iova) {
+			struct msm_drm_private *priv = obj->dev->dev_private;
+			uint32_t offset = (uint32_t)mmap_offset(obj);
+			iommu_unmap_range(priv->iommus[id], offset, obj->size);
+		}
+	}
+
+	put_pages(obj);
+
+	drm_gem_object_release(obj);
+
+	kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+		uint32_t size, uint32_t flags, uint32_t *handle)
+{
+	struct drm_gem_object *obj;
+	int ret;
+
+	obj = msm_gem_new(dev, size, flags);
+	if (!obj)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file, obj, handle);
+
+	/* drop reference from allocate - handle holds it now */
+	drm_gem_object_unreference_unlocked(obj);
+
+	return ret;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+		uint32_t size, uint32_t flags)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_gem_object *msm_obj;
+	struct drm_gem_object *obj = NULL;
+	int ret;
+
+	size = PAGE_ALIGN(size);
+
+	msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+	if (!msm_obj)
+		goto fail;
+
+	obj = &msm_obj->base;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret)
+		goto fail;
+
+	msm_obj->flags = flags;
+
+	mutex_lock(&obj->dev->struct_mutex);
+	list_add(&msm_obj->mm_list, &priv->obj_list);
+	mutex_unlock(&obj->dev->struct_mutex);
+
+	return obj;
+
+fail:
+	if (obj)
+		drm_gem_object_unreference_unlocked(obj);
+
+	return NULL;
+}