@@ -351,6 +351,7 @@ static int __init omap1_system_dma_init(void)
p->dma_write = dma_write;
p->dma_read = dma_read;
p->disable_irq_lch = NULL;
+ p->midlemode = NULL;
p->errata = configure_dma_errata();
@@ -36,7 +36,9 @@
static u32 errata;
static u8 dma_stride;
+static u32 midlemode_save_cnt;
+static struct platform_device *pdev;
static struct omap_dma_dev_attr *d;
static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
@@ -117,6 +119,18 @@ static inline u32 dma_read(int reg, int lch)
return val;
}
+static void midlemode_nostandby(bool nostandby)
+{
+ /* TODO: midlemode_save_cnt can be moved to hwmod layer? */
+ if (nostandby) {
+ omap_device_require_no_mstandby(pdev);
+ midlemode_save_cnt += 1;
+ } else {
+ omap_device_release_no_mstandby(pdev);
+ midlemode_save_cnt -= 1;
+ }
+}
+
static inline void omap2_disable_irq_lch(int lch)
{
u32 val;
@@ -253,6 +267,7 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
p->clear_dma = omap2_clear_dma;
p->dma_write = dma_write;
p->dma_read = dma_read;
+ p->midlemode = midlemode_nostandby;
p->clear_lch_regs = NULL;
@@ -286,6 +301,7 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
dev_err(&od->pdev.dev, "%s: kzalloc fail\n", __func__);
return -ENOMEM;
}
+ pdev = &od->pdev;
return 0;
}
@@ -38,8 +38,9 @@
#include <asm/system.h>
#include <mach/hardware.h>
-#include <plat/dma.h>
+#include <plat/dma.h>
+#include <plat/omap_device.h>
#include <plat/tc.h>
#undef DEBUG
@@ -924,6 +925,7 @@ EXPORT_SYMBOL(omap_start_dma);
void omap_stop_dma(int lch)
{
u32 l;
+ unsigned long flags;
/* Disable all interrupts on the channel */
if (cpu_class_is_omap1())
@@ -933,14 +935,13 @@ void omap_stop_dma(int lch)
if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
(l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
int i = 0;
- u32 sys_cf;
/* Configure No-Standby */
- l = p->dma_read(OCP_SYSCONFIG, lch);
- sys_cf = l;
- l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
- l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
- p->dma_write(l , OCP_SYSCONFIG, 0);
+ if (p->midlemode) {
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ p->midlemode(true);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+ }
l = p->dma_read(CCR, lch);
l &= ~OMAP_DMA_CCR_EN;
@@ -958,7 +959,11 @@ void omap_stop_dma(int lch)
printk(KERN_ERR "DMA drain did not complete on "
"lch %d\n", lch);
/* Restore OCP_SYSCONFIG */
- p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
+ if (p->midlemode) {
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ p->midlemode(false);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+ }
} else {
l &= ~OMAP_DMA_CCR_EN;
p->dma_write(l, CCR, lch);
@@ -1610,7 +1615,7 @@ int omap_stop_dma_chain_transfers(int chain_id)
{
int *channels;
u32 l, i;
- u32 sys_cf = 0;
+ unsigned long flags;
/* Check for input params */
if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
@@ -1625,12 +1630,10 @@ int omap_stop_dma_chain_transfers(int chain_id)
}
channels = dma_linked_lch[chain_id].linked_dmach_q;
- if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
- sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
- l = sys_cf;
- /* Middle mode reg set no Standby */
- l &= ~((1 << 12)|(1 << 13));
- p->dma_write(l, OCP_SYSCONFIG, 0);
+ if (IS_DMA_ERRATA(DMA_ERRATA_i88) && p->midlemode) {
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ p->midlemode(true);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
}
for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
@@ -1650,8 +1653,11 @@ int omap_stop_dma_chain_transfers(int chain_id)
/* Reset the Queue pointers */
OMAP_DMA_CHAIN_QINIT(chain_id);
- if (IS_DMA_ERRATA(DMA_ERRATA_i88))
- p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
+ if (IS_DMA_ERRATA(DMA_ERRATA_i88 && p->midlemode)) {
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ p->midlemode(false);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+ }
return 0;
}
@@ -435,6 +435,7 @@ struct omap_system_dma_plat_info {
void (*clear_dma)(int lch);
void (*dma_write)(u32 val, int reg, int lch);
u32 (*dma_read)(int reg, int lch);
+ void (*midlemode)(bool nostandby);
};
extern void omap_set_dma_priority(int lch, int dst_port, int priority);