@@ -65,17 +65,8 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
memcpy(&davinci_soc_info, soc_info, sizeof(struct davinci_soc_info));
- if (davinci_soc_info.io_desc && (davinci_soc_info.io_desc_num > 0))
- iotable_init(davinci_soc_info.io_desc,
- davinci_soc_info.io_desc_num);
-
- /*
- * Normally devicemaps_init() would flush caches and tlb after
- * mdesc->map_io(), but we must also do it here because of the CPU
- * revision check below.
- */
- local_flush_tlb_all();
- flush_cache_all();
+ davinci_io_init(davinci_soc_info.io_desc,
+ davinci_soc_info.io_desc_num);
/*
* We want to check CPU revision early for cpu_is_xxxx() macros.
@@ -1210,6 +1210,8 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
void __init da830_init(void)
{
+ davinci_io_init(da830_io_desc, ARRAY_SIZE(da830_io_desc));
+
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
return;
@@ -1099,6 +1099,8 @@ void __init da850_init(void)
{
unsigned int v;
+ davinci_io_init(da850_io_desc, ARRAY_SIZE(da850_io_desc));
+
da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
return;
@@ -25,8 +25,13 @@
#define __arch_ioremap(p, s, t) davinci_ioremap(p, s, t)
#define __arch_iounmap(v) davinci_iounmap(v)
+struct map_desc;
+
void __iomem *davinci_ioremap(unsigned long phys, size_t size,
unsigned int type);
void davinci_iounmap(volatile void __iomem *addr);
+
+void davinci_io_init(struct map_desc *desc, unsigned long desc_num);
+
#endif
#endif /* __ASM_ARCH_IO_H */
@@ -12,19 +12,31 @@
#include <linux/io.h>
#include <asm/tlb.h>
+#include <asm/mach/map.h>
-#define BETWEEN(p, st, sz) ((p) >= (st) && (p) < ((st) + (sz)))
-#define XLATE(p, pst, vst) ((void __iomem *)((p) - (pst) + (vst)))
+#include <mach/common.h>
+
+static struct map_desc *davinci_io_desc;
+static unsigned long davinci_io_desc_num;
/*
* Intercept ioremap() requests for addresses in our fixed mapping regions.
*/
void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
{
- if (BETWEEN(p, IO_PHYS, IO_SIZE))
- return XLATE(p, IO_PHYS, IO_VIRT);
+ struct map_desc *desc = davinci_io_desc;
+ int i;
+
+ for (i = 0; i < davinci_io_desc_num; i++, desc++) {
+ unsigned long iophys = __pfn_to_phys(desc->pfn);
+ unsigned long iosize = desc->length;
+
+ if (p >= iophys && (p + size) <= (iophys + iosize))
+ return __io(desc->virtual + p - iophys);
+ }
- return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
+ return __arm_ioremap_caller(p, size, type,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(davinci_ioremap);
@@ -36,3 +48,30 @@ void davinci_iounmap(volatile void __iomem *addr)
__iounmap(addr);
}
EXPORT_SYMBOL(davinci_iounmap);
+
+void __init davinci_io_init(struct map_desc *desc, unsigned long desc_num)
+{
+ if (!desc || !desc_num)
+ return;
+
+ /* silently refuse to init twice ...*/
+ if (davinci_io_desc) {
+ /* ... unless things change */
+ BUG_ON(davinci_io_desc != desc ||
+ davinci_io_desc_num != desc_num);
+ return;
+ }
+
+ davinci_io_desc = desc;
+ davinci_io_desc_num = desc_num;
+
+ iotable_init(desc, desc_num);
+
+ /*
+ * Normally devicemaps_init() would flush caches and tlb after
+ * mdesc->map_io(), but we must also do it here because of the CPU
+ * revision check below.
+ */
+ local_flush_tlb_all();
+ flush_cache_all();
+}