@@ -479,17 +479,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
int ret;
u32 val;
- if (addr >= high_memory) {
- struct page *p1;
-
- if (((size_t)addr & PAGE_MASK) !=
- ((size_t)(addr + len - 1) & PAGE_MASK))
- goto out_copy;
- p1 = vmalloc_to_page(addr);
- if (!p1)
- goto out_copy;
- addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
- }
+ if (addr >= high_memory)
+ goto out_copy;
sg_init_one(&sg, addr, len);
n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
@@ -546,6 +537,7 @@ out_copy:
else
is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
: omap_write_buf8(mtd, (u_char *) addr, len);
+
return 0;
}
Based on DMA documentation and testing using high memory buffer when doing dma transfers can lead to various issues including kernel panics. To workaround this simply use cpu copy. The amount of high memory buffers used are very uncommon so no noticeable performance hit should be seen. Signed-off-by: Franklin S Cooper Jr <fcooper@ti.com> --- V2 Changes: None drivers/mtd/nand/omap2.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-)