@@ -259,9 +259,6 @@ static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
if (ret)
return ret;
- /* Keep the WC mmaping set by drm_gem_mmap() but our pages
- * are ordinary and not special.
- */
vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
}
@@ -310,17 +307,17 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
+ long i, n_pages = obj->size >> PAGE_SHIFT;
struct page **pages;
pages = vgem_pin_pages(bo);
if (IS_ERR(pages))
return PTR_ERR(pages);
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
- */
- drm_clflush_pages(pages, n_pages);
+ for (i = 0; i < n_pages; i++) {
+ dma_sync_single_for_device(dev, page_to_phys(pages[i]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
return 0;
}
@@ -328,6 +325,13 @@ static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
static void vgem_prime_unpin(struct drm_gem_object *obj, struct device *dev)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
+ long i, n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages = bo->pages;
+
+ for (i = 0; i < n_pages; i++) {
+ dma_sync_single_for_cpu(dev, page_to_phys(pages[i]),
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
vgem_unpin_pages(bo);
}
@@ -382,7 +386,7 @@ static void *vgem_prime_vmap(struct drm_gem_object *obj)
if (IS_ERR(pages))
return NULL;
- return vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
+ return vmap(pages, n_pages, 0, PAGE_KERNEL);
}
static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -411,7 +415,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
fput(vma->vm_file);
vma->vm_file = get_file(obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
return 0;
}