@@ -102,6 +102,17 @@ static void drm_clflush_page(struct page *page)
}
#endif
+#if defined(CONFIG_ARM64)
+static void drm_clflush_page(struct page *page)
+{
+ void *virt;
+
+ virt = kmap_atomic(page);
+ __dma_flush_range(virt, virt + PAGE_SIZE);
+ kunmap_atomic(virt);
+}
+#endif
+
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
@@ -129,7 +140,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
(unsigned long)page_virtual + PAGE_SIZE);
kunmap_atomic(page_virtual);
}
-#elif defined(CONFIG_ARM)
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
unsigned long i;
for (i = 0; i < num_pages; i++)
@@ -158,7 +169,7 @@ drm_clflush_sg(struct sg_table *st)
if (wbinvd_on_all_cpus())
printk(KERN_ERR "Timed out waiting for cache flush.\n");
-#elif defined(CONFIG_ARM)
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
struct sg_page_iter sg_iter;
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)