@@ -631,6 +631,8 @@ struct drm_i915_gem_object {
struct drm_mm_node *stolen;
+ resource_size_t bo_offset;
+
unsigned long scratch;
u64 encode;
@@ -126,6 +126,8 @@ i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
static void
i915_ttm_place_from_region(const struct intel_memory_region *mr,
struct ttm_place *place,
+ resource_size_t offset,
+ resource_size_t size,
unsigned int flags)
{
memset(place, 0, sizeof(*place));
@@ -133,7 +135,10 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mr->io_size && mr->io_size < mr->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place->fpfn = offset >> PAGE_SHIFT;
+ place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
+ } else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
@@ -155,12 +160,14 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
placement->num_placement = 1;
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
- obj->mm.region, requested, flags);
+ obj->mm.region, requested, obj->bo_offset,
+ obj->base.size, flags);
/* Cache this on object? */
placement->num_busy_placement = num_allowed;
for (i = 0; i < placement->num_busy_placement; ++i)
- i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
+ i915_ttm_place_from_region(obj->mm.placements[i], busy + i,
+ obj->bo_offset, obj->base.size, flags);
if (num_allowed == 0) {
*busy = *requested;
@@ -802,7 +809,8 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj,
struct ttm_placement placement;
int ret;
- i915_ttm_place_from_region(mr, &requested, flags);
+ i915_ttm_place_from_region(mr, &requested, obj->bo_offset,
+ obj->base.size, flags);
placement.num_placement = 1;
placement.num_busy_placement = 1;
placement.placement = &requested;
@@ -1159,6 +1167,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
/* Don't put on a region list until we're either locked or fully initialized. */
obj->mm.region = mem;
INIT_LIST_HEAD(&obj->mm.region_link);
@@ -12,6 +12,7 @@
#include "intel_region_ttm.h"
+#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h" /* For the funcs/ops export only */
/**
* DOC: TTM support structure
@@ -191,6 +192,7 @@ intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem,
*/
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags)
{
@@ -202,7 +204,10 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
- if (mem->io_size && mem->io_size < mem->total) {
+ if (offset != I915_BO_INVALID_OFFSET) {
+ place.fpfn = offset >> PAGE_SHIFT;
+ place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
+ } else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
@@ -36,6 +36,7 @@ struct ttm_device_funcs *i915_ttm_driver(void);
#ifdef CONFIG_DRM_I915_SELFTEST
struct ttm_resource *
intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+ resource_size_t offset,
resource_size_t size,
unsigned int flags);
#endif
@@ -26,6 +26,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
int err;
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+ obj->bo_offset,
obj->base.size,
obj->flags);
if (IS_ERR(obj->mm.res))
@@ -71,6 +72,8 @@ static int mock_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags);
+ obj->bo_offset = offset;
+
obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
For the ttm backend we can use existing placements fpfn and lpfn to force the allocator to place the object at the requested offset, potentially evicting stuff if the spot is currently occupied. Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- .../gpu/drm/i915/gem/i915_gem_object_types.h | 2 ++ drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 18 ++++++++++++++---- drivers/gpu/drm/i915/intel_region_ttm.c | 7 ++++++- drivers/gpu/drm/i915/intel_region_ttm.h | 1 + drivers/gpu/drm/i915/selftests/mock_region.c | 3 +++ 5 files changed, 26 insertions(+), 5 deletions(-)