@@ -155,8 +155,6 @@ typedef struct _drm_intel_bufmgr_gem {
} drm_intel_bufmgr_gem;
-#define DRM_INTEL_RELOC_FENCE (1<<0)
-
typedef struct _drm_intel_reloc_target_info {
drm_intel_bo *bo;
int flags;
@@ -201,7 +199,7 @@ struct _drm_intel_bo_gem {
/** Number of entries in relocs */
int reloc_count;
/** Array of BOs that are referenced by this buffer and will be softpinned */
- drm_intel_bo **softpin_target;
+ drm_intel_reloc_target *softpin_target_info;
/** Number softpinned BOs that are referenced by this buffer */
int softpin_target_count;
/** Maximum amount of softpinned BOs that are referenced by this buffer */
@@ -426,7 +424,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
- if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target_info == NULL) {
DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
bo_gem->is_softpin ? "*" : "",
bo_gem->name);
@@ -454,7 +452,7 @@ drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
}
for (j = 0; j < bo_gem->softpin_target_count; j++) {
- drm_intel_bo *target_bo = bo_gem->softpin_target[j];
+ drm_intel_bo *target_bo = bo_gem->softpin_target_info[j].bo;
drm_intel_bo_gem *target_gem =
(drm_intel_bo_gem *) target_bo;
DBG("%2d: %d %s(%s) -> "
@@ -526,15 +524,16 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo)
}
static void
-drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
+drm_intel_add_validate_buffer2(drm_intel_bo *bo, int flags)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
- int flags = 0;
- if (need_fence)
- flags |= EXEC_OBJECT_NEEDS_FENCE;
+ /* Reject per-bo (softpin/48b) and invalid flags */
+ assert(!(flags & ~(EXEC_OBJECT_NEEDS_FENCE | EXEC_OBJECT_NEEDS_GTT |
+ EXEC_OBJECT_WRITE)));
+
if (bo_gem->use_48b_address_range)
flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
if (bo_gem->is_softpin)
@@ -1326,8 +1325,9 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
}
}
for (i = 0; i < bo_gem->softpin_target_count; i++)
- drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
- time);
+ drm_intel_gem_bo_unreference_locked_timed(bo_gem->
+ softpin_target_info[i].bo,
+ time);
bo_gem->reloc_count = 0;
bo_gem->used_as_reloc_target = false;
bo_gem->softpin_target_count = 0;
@@ -1344,9 +1344,9 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
free(bo_gem->relocs);
bo_gem->relocs = NULL;
}
- if (bo_gem->softpin_target) {
- free(bo_gem->softpin_target);
- bo_gem->softpin_target = NULL;
+ if (bo_gem->softpin_target_info) {
+ free(bo_gem->softpin_target_info);
+ bo_gem->softpin_target_info = NULL;
bo_gem->softpin_target_size = 0;
}
@@ -1991,7 +1991,7 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_gem_bo_reference(target_bo);
if (fenced_command)
bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
- DRM_INTEL_RELOC_FENCE;
+ EXEC_OBJECT_NEEDS_FENCE;
else
bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
@@ -2015,8 +2015,10 @@ drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
}
static int
-drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
+drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo,
+ uint32_t write_domain)
{
+ int flags = 0;
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
@@ -2038,14 +2040,23 @@ drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
if (new_size == 0)
new_size = bufmgr_gem->max_relocs;
- bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
- sizeof(drm_intel_bo *));
- if (!bo_gem->softpin_target)
+ bo_gem->softpin_target_info =
+ realloc(bo_gem->softpin_target_info,
+ new_size * sizeof(drm_intel_reloc_target));
+ if (!bo_gem->softpin_target_info)
return -ENOMEM;
bo_gem->softpin_target_size = new_size;
}
- bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
+ bo_gem->softpin_target_info[bo_gem->softpin_target_count].bo = target_bo;
+
+ if (write_domain)
+ flags |= EXEC_OBJECT_WRITE;
+ if (bufmgr_gem->gen == 6 && write_domain == I915_GEM_DOMAIN_INSTRUCTION)
+ flags |= EXEC_OBJECT_NEEDS_GTT;
+
+ bo_gem->softpin_target_info[bo_gem->softpin_target_count].flags = flags;
+
drm_intel_gem_bo_reference(target_bo);
bo_gem->softpin_target_count++;
@@ -2061,7 +2072,8 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
if (target_bo_gem->is_softpin)
- return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
+ return drm_intel_gem_bo_add_softpin_target(bo, target_bo,
+ write_domain);
else
return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
read_domains, write_domain,
@@ -2127,7 +2139,8 @@ drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
bo_gem->reloc_count = start;
for (i = 0; i < bo_gem->softpin_target_count; i++) {
- drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
+ drm_intel_bo_gem *target_bo_gem =
+ (drm_intel_bo_gem *) bo_gem->softpin_target_info[i].bo;
drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
}
bo_gem->softpin_target_count = 0;
@@ -2172,12 +2185,11 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int i;
- if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
+ if (bo_gem->relocs == NULL && bo_gem->softpin_target_info == NULL)
return;
for (i = 0; i < bo_gem->reloc_count; i++) {
drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
- int need_fence;
if (target_bo == bo)
continue;
@@ -2187,22 +2199,21 @@ drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc2(target_bo);
- need_fence = (bo_gem->reloc_target_info[i].flags &
- DRM_INTEL_RELOC_FENCE);
-
/* Add the target to the validate list */
- drm_intel_add_validate_buffer2(target_bo, need_fence);
+ drm_intel_add_validate_buffer2(target_bo,
+ bo_gem->reloc_target_info[i].flags);
}
for (i = 0; i < bo_gem->softpin_target_count; i++) {
- drm_intel_bo *target_bo = bo_gem->softpin_target[i];
+ drm_intel_bo *target_bo = bo_gem->softpin_target_info[i].bo;
if (target_bo == bo)
continue;
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
drm_intel_gem_bo_process_reloc2(target_bo);
- drm_intel_add_validate_buffer2(target_bo, false);
+ drm_intel_add_validate_buffer2(target_bo,
+ bo_gem->softpin_target_info[i].flags);
}
}
@@ -2958,14 +2969,15 @@ _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
if (bo == bo_gem->reloc_target_info[i].bo)
continue;
if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
- target_bo))
+ target_bo))
return 1;
}
for (i = 0; i< bo_gem->softpin_target_count; i++) {
- if (bo_gem->softpin_target[i] == target_bo)
+ if (bo_gem->softpin_target_info[i].bo == target_bo)
return 1;
- if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
+ if (_drm_intel_gem_bo_references(bo_gem->softpin_target_info[i].bo,
+ target_bo))
return 1;
}
Since we're short-circuting reloc handling by using softpin we need to pass additional exec2_object flags, to ensure correct synchronization and handle the gen6 case, where some objects need global gtt mapping. Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Kristian Høgsberg Kristensen <kristian.h.kristensen@intel.com> Signed-off-by: Micha? Winiarski <michal.winiarski@intel.com> --- intel/intel_bufmgr_gem.c | 78 ++++++++++++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 33 deletions(-)