@@ -350,6 +350,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_REVISION 32
#define I915_PARAM_SUBSLICE_TOTAL 33
#define I915_PARAM_EU_TOTAL 34
+#define I915_PARAM_HAS_EXEC_SOFTPIN 37
typedef struct drm_i915_getparam {
int param;
@@ -680,7 +681,8 @@ struct drm_i915_gem_exec_object2 {
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
+#define EXEC_OBJECT_PINNED (1<<3)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PINNED<<1)
__u64 flags;
__u64 rsvd1;
@@ -261,6 +261,15 @@ drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
}
int
+drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
+{
+ if (bo->bufmgr->bo_set_softpin_offset)
+ return bo->bufmgr->bo_set_softpin_offset(bo, offset);
+
+ return -ENODEV;
+}
+
+int
drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
@@ -164,6 +164,7 @@ int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
int drm_intel_bo_busy(drm_intel_bo *bo);
int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
+int drm_intel_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset);
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
int drm_intel_bo_is_reusable(drm_intel_bo *bo);
@@ -184,6 +184,13 @@ struct _drm_intel_bo_gem {
drm_intel_reloc_target *reloc_target_info;
/** Number of entries in relocs */
int reloc_count;
+ /** Array of BOs that are referenced by this buffer and will be softpinned */
+ drm_intel_bo **softpin_target;
+ /** Number softpinned BOs that are referenced by this buffer */
+ int softpin_target_count;
+ /** Maximum amount of softpinned BOs that are referenced by this buffer */
+ int softpin_target_size;
+
/** Mapped address for the buffer, saved across map/unmap cycles */
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
@@ -237,6 +244,11 @@ struct _drm_intel_bo_gem {
bool is_userptr;
/**
+ * Whether this buffer is softpinned at offset specified by the user
+ */
+ bool is_softpin;
+
+ /**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
@@ -384,8 +396,9 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- if (bo_gem->relocs == NULL) {
- DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
+ DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
bo_gem->name);
continue;
}
@@ -395,16 +408,33 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo_gem *target_gem =
(drm_intel_bo_gem *) target_bo;
- DBG("%2d: %d (%s)@0x%08llx -> "
+ DBG("%2d: %d %s(%s)@0x%016llx -> "
"%d (%s)@0x%08lx + 0x%08x\n",
i,
- bo_gem->gem_handle, bo_gem->name,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
(unsigned long long)bo_gem->relocs[j].offset,
target_gem->gem_handle,
target_gem->name,
target_bo->offset64,
bo_gem->relocs[j].delta);
}
+
+ for (j = 0; j < bo_gem->softpin_target_count; j++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[j];
+ drm_intel_bo_gem *target_gem =
+ (drm_intel_bo_gem *) target_bo;
+ DBG("%2d: %d %s(%s) -> "
+ "%d *(%s)@0x%016lx\n",
+ i,
+ bo_gem->gem_handle,
+ bo_gem->is_softpin ? "*" : "",
+ bo_gem->name,
+ target_gem->gem_handle,
+ target_gem->name,
+ target_bo->offset64);
+ }
}
}
@@ -468,11 +498,18 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
+ int flags = 0;
+
+ if (need_fence)
+ flags |= EXEC_OBJECT_NEEDS_FENCE;
+ if (bo_gem->is_softpin)
+ flags |= EXEC_OBJECT_PINNED;
if (bo_gem->validate_index != -1) {
if (need_fence)
bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
EXEC_OBJECT_NEEDS_FENCE;
+ bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
return;
}
@@ -499,15 +536,12 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
bufmgr_gem->exec2_objects[index].alignment = bo->align;
- bufmgr_gem->exec2_objects[index].offset = 0;
+ bufmgr_gem->exec2_objects[index].offset = bo_gem->is_softpin ?
+ bo->offset64 : 0;
bufmgr_gem->exec_bos[index] = bo;
- bufmgr_gem->exec2_objects[index].flags = 0;
+ bufmgr_gem->exec2_objects[index].flags = flags;
bufmgr_gem->exec2_objects[index].rsvd1 = 0;
bufmgr_gem->exec2_objects[index].rsvd2 = 0;
- if (need_fence) {
- bufmgr_gem->exec2_objects[index].flags |=
- EXEC_OBJECT_NEEDS_FENCE;
- }
bufmgr_gem->exec_count++;
}
@@ -1256,8 +1290,12 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
time);
}
}
+ for (i = 0; i < bo_gem->softpin_target_count; i++)
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
+ time);
bo_gem->reloc_count = 0;
bo_gem->used_as_reloc_target = false;
+ bo_gem->softpin_target_count = 0;
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
@@ -1271,6 +1309,11 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
free(bo_gem->relocs);
bo_gem->relocs = NULL;
}
+ if (bo_gem->softpin_target) {
+ free(bo_gem->softpin_target);
+ bo_gem->softpin_target = NULL;
+ bo_gem->softpin_target_size = 0;
+ }
/* Clear any left-over mappings */
if (bo_gem->map_count) {
@@ -1908,14 +1951,6 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
}
- bo_gem->relocs[bo_gem->reloc_count].offset = offset;
- bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
- bo_gem->relocs[bo_gem->reloc_count].target_handle =
- target_bo_gem->gem_handle;
- bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
- bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
- bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
-
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
drm_intel_gem_bo_reference(target_bo);
@@ -1925,21 +1960,70 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
else
bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
+ bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+ bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
+ bo_gem->relocs[bo_gem->reloc_count].target_handle =
+ target_bo_gem->gem_handle;
+ bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+ bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+ bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
bo_gem->reloc_count++;
return 0;
}
static int
+drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
+{
+ drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
+ if (bo_gem->has_error)
+ return -ENOMEM;
+
+ if (target_bo_gem->has_error) {
+ bo_gem->has_error = true;
+ return -ENOMEM;
+ }
+
+ if (!target_bo_gem->is_softpin)
+ return -EINVAL;
+ if (target_bo_gem == bo_gem)
+ return -EINVAL;
+
+ if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
+ int new_size = bo_gem->softpin_target_size * 2;
+ if (new_size == 0)
+ new_size = bufmgr_gem->max_relocs;
+
+ bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
+ sizeof(drm_intel_bo *));
+ if (!bo_gem->softpin_target)
+ return -ENOMEM;
+
+ bo_gem->softpin_target_size = new_size;
+ }
+ bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
+ drm_intel_gem_bo_reference(target_bo);
+ bo_gem->softpin_target_count++;
+
+ return 0;
+}
+
+static int
drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
- return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
- read_domains, write_domain,
- !bufmgr_gem->fenced_relocs);
+ if (target_bo_gem->is_softpin)
+ return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
+ else
+ return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
+ read_domains, write_domain,
+ !bufmgr_gem->fenced_relocs);
}
static int
@@ -1972,6 +2056,8 @@ drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
*
* Any further drm_intel_bufmgr_check_aperture_space() queries
* involving this buffer in the tree are undefined after this call.
+ *
+ * This also removes all softpinned targets being referenced by the BO.
*/
void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
@@ -1998,6 +2084,12 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
}
bo_gem->reloc_count = start;
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
+ drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
+ }
+ bo_gem->softpin_target_count = 0;
+
pthread_mutex_unlock(&bufmgr_gem->lock);
}
@@ -2038,7 +2130,7 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int i;
- if (bo_gem->relocs == NULL)
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
@@ -2059,6 +2151,17 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
/* Add the target to the validate list */
drm_intel_add_validate_buffer2(target_bo, need_fence);
}
+
+ for (i = 0; i < bo_gem->softpin_target_count; i++) {
+ drm_intel_bo *target_bo = bo_gem->softpin_target[i];
+
+ if (target_bo == bo)
+ continue;
+
+ drm_intel_gem_bo_mark_mmaps_incoherent(bo);
+ drm_intel_gem_bo_process_reloc2(target_bo);
+ drm_intel_add_validate_buffer2(target_bo, false);
+ }
}
@@ -2094,7 +2197,11 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
/* Update the buffer offset */
if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
- DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+ /* If we're seeing softpinned object here it means that the kernel
+ * has relocated our object... Indicating a programming error
+ */
+ assert(!bo_gem->is_softpin);
+ DBG("BO %d (%s) migrated: 0x%016lx -> 0x%016llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset64,
(unsigned long long)bufmgr_gem->exec2_objects[i].offset);
bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
@@ -2418,6 +2525,17 @@ drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
return 0;
}
+static int
+drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
+{
+ drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+ bo_gem->is_softpin = true;
+ bo->offset64 = offset;
+ bo->offset = offset;
+ return 0;
+}
+
drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
@@ -2796,6 +2914,13 @@ _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
return 1;
}
+ for (i = 0; i< bo_gem->softpin_target_count; i++) {
+ if (bo_gem->softpin_target[i] == target_bo)
+ return 1;
+ if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
+ return 1;
+ }
+
return 0;
}
@@ -3252,6 +3377,11 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
+ gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
+ ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+ if (ret == 0 && *gp.value > 0)
+ bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
+
if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
@@ -227,6 +227,13 @@ struct _drm_intel_bufmgr {
uint32_t * swizzle_mode);
/**
+ * Set the offset at which this buffer will be softpinned
+ * \param bo Buffer to set the softpin offset for
+ * \param offset Softpin offset
+ */
+ int (*bo_set_softpin_offset) (drm_intel_bo *bo, uint64_t offset);
+
+ /**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
Softpin allows userspace to take greater control of GPU virtual address space and eliminates the need of relocations. It can also be used to mirror addresses between GPU and CPU (shared virtual memory). Calls to drm_intel_bo_emit_reloc are still required to build the list of drm_i915_gem_exec_objects at exec time, but no entries in relocs are created. Self-relocs don't make any sense for softpinned objects and can indicate a programming errors, thus are forbidden. Softpinned objects are marked by asterisk in debug dumps. Cc: Thomas Daniel <thomas.daniel@intel.com> Cc: Kristian Høgsberg <krh@bitplanet.net> Cc: Zou Nanhai <nanhai.zou@intel.com> Cc: Michel Thierry <michel.thierry@intel.com> Cc: Ben Widawsky <ben@bwidawsk.net> Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Micha? Winiarski <michal.winiarski@intel.com> --- include/drm/i915_drm.h | 4 +- intel/intel_bufmgr.c | 9 +++ intel/intel_bufmgr.h | 1 + intel/intel_bufmgr_gem.c | 176 ++++++++++++++++++++++++++++++++++++++++------ intel/intel_bufmgr_priv.h | 7 ++ 5 files changed, 173 insertions(+), 24 deletions(-)