@@ -539,6 +539,19 @@ struct i915_hw_ppgtt {
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
+/* To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct i915_address_space *vm;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node node;
+ /* Page aligned offset (helper for stolen) */
+ unsigned long deferred_offset;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+};
+
struct i915_ctx_hang_stats {
/* This context had batch pending when hang was declared */
unsigned batch_pending;
@@ -1222,8 +1235,9 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- /** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ struct list_head vma_list;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
@@ -1351,6 +1365,7 @@ struct drm_i915_gem_object {
static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o)
{
+ BUG_ON(list_empty(&o->vma_list));
return o->gtt_space->start;
}
@@ -1361,6 +1376,7 @@ static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o)
static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o)
{
+ BUG_ON(list_empty(&o->vma_list));
return o->gtt_space->size;
}
@@ -1370,6 +1386,16 @@ static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
o->gtt_space->color = color;
}
+/* This is a temporary define to help transition us to real VMAs. If you see
+ * this, you're either reviewing code, or bisecting it. */
+static inline struct i915_vma *
+__i915_gem_obj_to_vma(struct drm_i915_gem_object *obj)
+{
+ BUG_ON(!i915_gem_obj_bound(obj));
+ BUG_ON(list_empty(&obj->vma_list));
+ return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
+}
+
/**
* Request queue structure.
*
@@ -1680,6 +1706,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj);
+void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
@@ -2585,6 +2585,7 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma;
int ret;
if (!i915_gem_obj_bound(obj))
@@ -2622,13 +2623,22 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
+ vma = __i915_gem_obj_to_vma(obj);
+ list_del(&vma->vma_link);
+ /* FIXME: drm_mm_remove_node(&vma->node); */
+ i915_gem_vma_destroy(vma);
+
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist */
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+
return 0;
}
@@ -3079,8 +3089,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
bool mappable, fenceable;
size_t gtt_max = map_and_fenceable ?
dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ struct i915_vma *vma;
int ret;
+ if (WARN_ON(!list_empty(&obj->vma_list)))
+ return -EBUSY;
+
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
@@ -3124,6 +3138,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
+ vma = i915_gem_vma_create(obj);
+ if (vma == NULL) {
+ kfree(node);
+ i915_gem_object_unpin_pages(obj);
+ return -ENOMEM;
+ }
search_free:
ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, node,
@@ -3160,6 +3180,9 @@ search_free:
list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
obj->gtt_space = node;
+ vma->node.start = node->start;
+ vma->node.size = node->size;
+ list_add(&vma->vma_link, &obj->vma_list);
fenceable =
node->size == fence_size &&
@@ -3317,6 +3340,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mm_node *node = NULL;
int ret;
if (obj->cache_level == cache_level)
@@ -3327,7 +3351,12 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
+ if (i915_gem_obj_bound(obj)) {
+ node = obj->gtt_space;
+ BUG_ON(node->start != __i915_gem_obj_to_vma(obj)->node.start);
+ }
+
+ if (!i915_gem_valid_gtt_space(dev, node, cache_level)) {
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
@@ -3872,6 +3901,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
@@ -3992,6 +4022,26 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ vma->vm = i915_gtt_vm;
+ vma->obj = obj;
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
@@ -38,6 +38,8 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
return false;
list_add(&obj->exec_list, unwind);
+ BUG_ON(__i915_gem_obj_to_vma(obj)->node.start !=
+ i915_gem_obj_offset(obj));
return drm_mm_scan_add_block(obj->gtt_space);
}
@@ -48,6 +50,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
+ struct i915_vma *vma;
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -106,7 +109,8 @@ none:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
-
+ vma = __i915_gem_obj_to_vma(obj);
+ BUG_ON(vma->node.start != i915_gem_obj_offset(obj));
ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
@@ -127,6 +131,8 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
+ vma = __i915_gem_obj_to_vma(obj);
+ BUG_ON(vma->node.start != i915_gem_obj_offset(obj));
if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
@@ -687,6 +687,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
i915_gem_obj_offset(obj), obj->base.size);
BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0);
+ BUG_ON((__i915_gem_obj_to_vma(obj)->deferred_offset
+ & I915_GTT_RESERVED) == 0);
gtt_offset = gtt_offset & ~I915_GTT_RESERVED;
obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
if (!obj->gtt_space) {
@@ -700,6 +702,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
+ list_add(&__i915_gem_obj_to_vma(obj)->vma_link, &obj->vma_list);
}
i915_gtt_vm->start = start;
@@ -330,6 +330,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
int ret;
if (dev_priv->gtt.stolen_base == 0)
@@ -368,6 +369,12 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == -1)
return obj;
+ vma = i915_gem_vma_create(obj);
+ if (!vma) {
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
+ }
+
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
@@ -376,6 +383,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (drm_mm_initialized(&i915_gtt_vm->mm)) {
obj->gtt_space = kzalloc(sizeof(*obj->gtt_space), GFP_KERNEL);
if (!obj->gtt_space) {
+ i915_gem_vma_destroy(vma);
drm_gem_object_unreference(&obj->base);
return NULL;
}
@@ -383,15 +391,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
gtt_offset, size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+ i915_gem_vma_destroy(vma);
drm_gem_object_unreference(&obj->base);
kfree(obj->gtt_space);
return NULL;
}
+ vma->node.start = obj->gtt_space->start;
+ vma->node.size = obj->gtt_space->size;
obj->gtt_space->start = gtt_offset;
+ list_add(&vma->vma_link, &obj->vma_list);
} else {
/* NB: Safe because we assert page alignment */
obj->gtt_space = (struct drm_mm_node *)
((uintptr_t)gtt_offset | I915_GTT_RESERVED);
+ vma->deferred_offset = gtt_offset | I915_GTT_RESERVED;
}
obj->has_global_gtt_mapping = 1;
Creates the VMA, but leaves the old obj->gtt_space in place. This primarily just puts the basic infrastructure in place, and helps check for leaks. BISECT WARNING: This patch was not meant for bisect. If it does end up upstream, it should be included in the 3 part series for creating the VMA. v2: s/i915_obj/i915_gem_obj (Chris) v3: Only move an object to the now global unbound list if there are no more VMAs for the object which are bound into a VM (ie. the list is empty). Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 30 ++++++++++++++++++- drivers/gpu/drm/i915/i915_gem.c | 54 ++++++++++++++++++++++++++++++++-- drivers/gpu/drm/i915/i915_gem_evict.c | 8 ++++- drivers/gpu/drm/i915/i915_gem_gtt.c | 3 ++ drivers/gpu/drm/i915/i915_gem_stolen.c | 13 ++++++++ 5 files changed, 104 insertions(+), 4 deletions(-)