@@ -1581,7 +1581,8 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
if (!bdev->dev_mapping)
return;
-
+ if (bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, &bo->mem);
unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
@@ -84,26 +84,36 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
+ struct ttm_bus_placement pl;
int ret;
void *addr;
*virtual = NULL;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size);
- if (ret || bus_size == 0)
- return ret;
+ if (bdev->driver->io_mem_reserve) {
+ ret = bdev->driver->io_mem_reserve(bdev, mem, &pl);
+ if (unlikely(ret != 0)) {
+ return ret;
+ }
+ } else {
+ ret = ttm_bo_pci_offset(bdev, mem, &pl.base, &pl.offset, &pl.size);
+ if (unlikely(ret != 0) || pl.size == 0) {
+ return ret;
+ }
+ pl.is_iomem = (pl.size != 0);
+ }
if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
- addr = (void *)(((u8 *) man->io_addr) + bus_offset);
+ addr = (void *)(pl.base + pl.offset);
else {
if (mem->placement & TTM_PL_FLAG_WC)
- addr = ioremap_wc(bus_base + bus_offset, bus_size);
+ addr = ioremap_wc(pl.base + pl.offset, pl.size);
else
- addr = ioremap_nocache(bus_base + bus_offset, bus_size);
- if (!addr)
+ addr = ioremap_nocache(pl.base + pl.offset, pl.size);
+ if (!addr) {
+ if (bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
return -ENOMEM;
+ }
}
*virtual = addr;
return 0;
@@ -118,6 +128,8 @@ void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP))
iounmap(virtual);
+ if (bdev->driver->io_mem_free)
+ bdev->driver->io_mem_free(bdev, mem);
}
static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -440,13 +452,12 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
+ struct ttm_bus_placement pl;
int ret;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
+ map->bo = bo;
if (num_pages > bo->num_pages)
return -EINVAL;
if (start_page > bo->num_pages)
@@ -455,16 +466,24 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
return -EPERM;
#endif
- ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base,
- &bus_offset, &bus_size);
- if (ret)
- return ret;
- if (bus_size == 0) {
+ if (bo->bdev->driver->io_mem_reserve) {
+ ret = bo->bdev->driver->io_mem_reserve(bo->bdev, &bo->mem, &pl);
+ if (unlikely(ret != 0)) {
+ return ret;
+ }
+ } else {
+ ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &pl.base, &pl.offset, &pl.size);
+ if (unlikely(ret != 0)) {
+ return ret;
+ }
+ pl.is_iomem = (pl.size != 0);
+ }
+ if (!pl.is_iomem) {
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
} else {
- bus_offset += start_page << PAGE_SHIFT;
- bus_size = num_pages << PAGE_SHIFT;
- return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
+ pl.offset += start_page << PAGE_SHIFT;
+ pl.size = num_pages << PAGE_SHIFT;
+ return ttm_bo_ioremap(bo, pl.base, pl.offset, pl.size, map);
}
}
EXPORT_SYMBOL(ttm_bo_kmap);
@@ -476,6 +495,8 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
switch (map->bo_kmap_type) {
case ttm_bo_map_iomap:
iounmap(map->virtual);
+ if (map->bo->bdev->driver->io_mem_free)
+ map->bo->bdev->driver->io_mem_free(map->bo->bdev, &map->bo->mem);
break;
case ttm_bo_map_vmap:
vunmap(map->virtual);
@@ -493,35 +514,6 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
-int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
- unsigned long dst_offset,
- unsigned long *pfn, pgprot_t *prot)
-{
- struct ttm_mem_reg *mem = &bo->mem;
- struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_offset;
- unsigned long bus_size;
- unsigned long bus_base;
- int ret;
- ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset,
- &bus_size);
- if (ret)
- return -EINVAL;
- if (bus_size != 0)
- *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
- else
- if (!bo->ttm)
- return -EINVAL;
- else
- *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm,
- dst_offset >>
- PAGE_SHIFT));
- *prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
- PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL);
-
- return 0;
-}
-
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
@@ -74,9 +74,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
struct ttm_bo_device *bdev = bo->bdev;
- unsigned long bus_base;
- unsigned long bus_offset;
- unsigned long bus_size;
+ struct ttm_bus_placement pl;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -84,7 +82,6 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
int ret;
int i;
- bool is_iomem;
unsigned long address = (unsigned long)vmf->virtual_address;
int retval = VM_FAULT_NOPAGE;
@@ -104,11 +101,33 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (bdev->driver->fault_reserve_notify)
bdev->driver->fault_reserve_notify(bo);
+ if (bdev->driver->fault_reserve) {
+ ret = bdev->driver->fault_reserve(bo, &pl);
+ switch (ret) {
+ case 0:
+ break;
+ case -EBUSY:
+ set_need_resched();
+ case -ERESTARTSYS:
+ retval = VM_FAULT_NOPAGE;
+ goto out_unlock;
+ default:
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ } else {
+ ret = ttm_bo_pci_offset(bdev, &bo->mem, &pl.base, &pl.offset, &pl.size);
+ if (unlikely(ret != 0)) {
+ retval = VM_FAULT_SIGBUS;
+ goto out_unlock;
+ }
+ pl.is_iomem = (pl.size != 0);
+ }
+
/*
* Wait for buffer data in transit, due to a pipelined
* move.
*/
-
spin_lock(&bo->lock);
if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
ret = ttm_bo_wait(bo, false, true, false);
@@ -122,20 +141,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
spin_unlock(&bo->lock);
- ret = ttm_bo_pci_offset(bdev, &bo->mem, &bus_base, &bus_offset,
- &bus_size);
- if (unlikely(ret != 0)) {
- retval = VM_FAULT_SIGBUS;
- goto out_unlock;
- }
-
- is_iomem = (bus_size != 0);
-
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) +
bo->vm_node->start - vma->vm_pgoff;
-
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
goto out_unlock;
@@ -154,8 +163,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* vma->vm_page_prot when the object changes caching policy, with
* the correct locks held.
*/
-
- if (is_iomem) {
+ if (pl.is_iomem) {
vma->vm_page_prot = ttm_io_prot(bo->mem.placement,
vma->vm_page_prot);
} else {
@@ -169,12 +177,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Speculatively prefault a number of pages. Only error on
* first page.
*/
-
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
-
- if (is_iomem)
- pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) +
- page_offset;
+ if (pl.is_iomem)
+ pfn = ((pl.base + pl.offset) >> PAGE_SHIFT) + page_offset;
else {
page = ttm_tt_get_page(ttm, page_offset);
if (unlikely(!page && i == 0)) {
@@ -191,14 +196,12 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE, or prefaulting error.
*/
-
if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
break;
else if (unlikely(ret != 0)) {
retval =
(ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
goto out_unlock;
-
}
address += PAGE_SIZE;
@@ -221,9 +224,10 @@ static void ttm_bo_vm_open(struct vm_area_struct *vma)
static void ttm_bo_vm_close(struct vm_area_struct *vma)
{
- struct ttm_buffer_object *bo =
- (struct ttm_buffer_object *)vma->vm_private_data;
+ struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
+ if (bo->bdev->driver->io_mem_free)
+ bo->bdev->driver->io_mem_free(bo->bdev, &bo->mem);
ttm_bo_unref(&bo);
vma->vm_private_data = NULL;
}
@@ -274,6 +274,7 @@ struct ttm_bo_kmap_obj {
ttm_bo_map_kmap = 3,
ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK,
} bo_kmap_type;
+ struct ttm_buffer_object *bo;
};
/**
@@ -240,6 +240,23 @@ struct ttm_mem_type_manager {
};
/**
+ * struct ttm_bus_placement
+ *
+ * @base: bus base address
+ * @is_iomem: is this io memory ?
+ * @size: size in byte
+ * @offset: offset from the base address
+ *
+ * Structure indicating the bus placement of an object.
+ */
+struct ttm_bus_placement {
+ unsigned long base;
+ unsigned long size;
+ unsigned long offset;
+ bool is_iomem;
+};
+
+/**
* struct ttm_bo_driver
*
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
@@ -358,6 +375,19 @@ struct ttm_bo_driver {
* notify the driver that we're about to swap out this bo
*/
void (*swap_notify) (struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on bo fault, driver is responsible to fill the
+ * bus placement and has the opportunity to move the buffer into
+ * visible space.
+ */
+ int (*fault_reserve)(struct ttm_buffer_object *bo, struct ttm_bus_placement *pl);
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy for instance).
+ * TTM will take care to call io_mem_free whenever the mapping is not use anymore.
+ */
+ int (*io_mem_reserve)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, struct ttm_bus_placement *pl);
+ void (*io_mem_free)(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
};
/**