@@ -48,7 +48,7 @@ static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
/* For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
*/
- if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_UNCACHED))
+ if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_CACHED))
dma_map_sg(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents, DMA_BIDIRECTIONAL);
@@ -60,10 +60,22 @@ static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
if (etnaviv_obj->pages) {
struct drm_device *dev = etnaviv_obj->base.dev;
- /* For non-cached buffers, ensure the new pages are clean
+ /*
+ * For non-cached buffers, ensure the new pages are clean
* because display controller, GPU, etc. are not coherent:
+ *
+ * WARNING: The DMA API does not support concurrent CPU
+ * and device access to the memory area. With BIDIRECTIONAL,
+ * we will clean the cache lines which overlap the region,
+ * and invalidate all cache lines (partially) contained in
+ * the region.
+ *
+ * If you have dirty data in the overlapping cache lines,
+ * that will corrupt the GPU-written data. If you have
+ * written into the remainder of the region, this can
+ * discard those writes.
*/
- if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_UNCACHED))
+ if (etnaviv_obj->flags & (ETNA_BO_WC|ETNA_BO_CACHED))
dma_unmap_sg(dev->dev, etnaviv_obj->sgt->sgl,
etnaviv_obj->sgt->nents,
DMA_BIDIRECTIONAL);