@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
+ dst = kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(dst, prot);
+ kunmap_atomic(dst);
return 0;
}
@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
+ src = kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(src, prot);
+ kunmap_atomic(src);
return 0;
}
@@ -374,12 +374,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ kunmap_atomic(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ kunmap_atomic(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,8 +388,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->dst_addr =
- ttm_kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
+ kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
@@ -401,8 +401,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->src_addr =
- ttm_kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
+ kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
if (!d->src_addr)
return -ENOMEM;
@@ -499,9 +499,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ kunmap_atomic(d.src_addr);
if (d.dst_addr)
- ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+ kunmap_atomic(d.dst_addr);
return ret;
}
@@ -668,10 +668,6 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
-
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
-
/**
* ttm_bo_io
*