Message ID | 1516111004-10247-3-git-send-email-thellstrom@vmware.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Am 16.01.2018 um 14:56 schrieb Thomas Hellstrom: > It will be used by vmwgfx cpu blit. > > Cc: Christian König <christian.koenig@amd.com> > Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> > Reviewed-by: Brian Paul <brianp@vmware.com> Reviewed-by: Christian König <christian.koenig@amd.com> > --- > drivers/gpu/drm/ttm/ttm_bo_util.c | 31 ++++++++++++++++++++++++++----- > include/drm/ttm/ttm_bo_api.h | 4 ++++ > 2 files changed, 30 insertions(+), 5 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c > index 6d6d939..9d4c7f8 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c > @@ -263,24 +263,45 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) > #define __ttm_kunmap_atomic(__addr) vunmap(__addr) > #endif > > -static void *ttm_kmap_atomic_prot(struct page *page, > - pgprot_t prot) > + > +/** > + * ttm_kmap_atomic_prot - Efficient kernel map of a single page with > + * specified page protection. > + * > + * @page: The page to map. > + * @prot: The page protection. > + * > + * This function maps a TTM page using the kmap_atomic api if available, > + * otherwise falls back to vmap. The user must make sure that the > + * specified page does not have an aliased mapping with a different caching > + * policy unless the architecture explicitly allows it. Also mapping and > + * unmapping using this api must be correctly nested. Unmapping should > + * occur in the reverse order of mapping. > + */ > +void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot) > { > if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) > return kmap_atomic(page); > else > return __ttm_kmap_atomic_prot(page, prot); > } > +EXPORT_SYMBOL(ttm_kmap_atomic_prot); > > - > -static void ttm_kunmap_atomic_prot(void *addr, > - pgprot_t prot) > +/** > + * ttm_kunmap_atomic_prot - Unmap a page that was mapped using > + * ttm_kmap_atomic_prot. > + * > + * @addr: The virtual address from the map. > + * @prot: The page protection. > + */ > +void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot) > { > if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) > kunmap_atomic(addr); > else > __ttm_kunmap_atomic(addr); > } > +EXPORT_SYMBOL(ttm_kunmap_atomic_prot); > > static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, > unsigned long page, > diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h > index f1c74c2..936e5d5 100644 > --- a/include/drm/ttm/ttm_bo_api.h > +++ b/include/drm/ttm/ttm_bo_api.h > @@ -728,6 +728,10 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, > int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, > struct ttm_bo_device *bdev); > > +void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot); > + > +void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot); > + > /** > * ttm_bo_io > *
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 6d6d939..9d4c7f8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -263,24 +263,45 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page) #define __ttm_kunmap_atomic(__addr) vunmap(__addr) #endif -static void *ttm_kmap_atomic_prot(struct page *page, - pgprot_t prot) + +/** + * ttm_kmap_atomic_prot - Efficient kernel map of a single page with + * specified page protection. + * + * @page: The page to map. + * @prot: The page protection. + * + * This function maps a TTM page using the kmap_atomic api if available, + * otherwise falls back to vmap. The user must make sure that the + * specified page does not have an aliased mapping with a different caching + * policy unless the architecture explicitly allows it. Also mapping and + * unmapping using this api must be correctly nested. Unmapping should + * occur in the reverse order of mapping. + */ +void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot) { if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) return kmap_atomic(page); else return __ttm_kmap_atomic_prot(page, prot); } +EXPORT_SYMBOL(ttm_kmap_atomic_prot); - -static void ttm_kunmap_atomic_prot(void *addr, - pgprot_t prot) +/** + * ttm_kunmap_atomic_prot - Unmap a page that was mapped using + * ttm_kmap_atomic_prot. + * + * @addr: The virtual address from the map. + * @prot: The page protection. + */ +void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot) { if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL)) kunmap_atomic(addr); else __ttm_kunmap_atomic(addr); } +EXPORT_SYMBOL(ttm_kunmap_atomic_prot); static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index f1c74c2..936e5d5 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -728,6 +728,10 @@ unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo, int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev); +void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot); + +void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot); + /** * ttm_bo_io *