@@ -122,10 +122,14 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (pinned x %d)", obj->pin_count);
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (i915_gem_obj_bound(obj))
- seq_printf(m, " (gtt offset: %08lx, size: %08lx)",
- i915_gem_obj_offset(obj),
- i915_gem_obj_size(obj));
+ if (i915_gem_obj_bound_any(obj)) {
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ seq_printf(m, " (gtt offset: %08lx, size: %08lx)",
+ i915_gem_obj_offset(obj, vma->vm),
+ i915_gem_obj_size(obj, vma->vm));
+ }
+ }
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -159,11 +163,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
- head = &i915_gtt_vm->active_list;
+ head = ggtt_list(active_list);
break;
case INACTIVE_LIST:
seq_printf(m, "Inactive:\n");
- head = &i915_gtt_vm->inactive_list;
+ head = ggtt_list(inactive_list);
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -176,7 +180,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_size(obj);
+ /* FIXME: Add size of all VMs */
+ total_gtt_size += i915_gem_ggtt_size(obj);
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,12 +191,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+/* FIXME: Support multiple VM? */
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += i915_gem_obj_size(obj); \
+ size += i915_gem_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += i915_gem_obj_size(obj); \
+ mappable_size += i915_gem_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -216,17 +222,17 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
dev_priv->mm.object_memory);
size = count = mappable_size = mappable_count = 0;
- count_objects(&i915_gtt_vm->bound_list, gtt_list);
+ count_objects(ggtt_list(bound_list), gtt_list);
seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&i915_gtt_vm->active_list, mm_list);
+ count_objects(ggtt_list(active_list), mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&i915_gtt_vm->inactive_list, mm_list);
+ count_objects(ggtt_list(inactive_list), mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -239,13 +245,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u unbound objects, %zu bytes\n", count, size);
size = count = mappable_size = mappable_count = 0;
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
+ list_for_each_entry(obj, ggtt_list(bound_list), gtt_list) {
if (obj->fault_mappable) {
- size += i915_gem_obj_size(obj);
+ size += i915_gem_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += i915_gem_obj_size(obj);
+ mappable_size += i915_gem_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -261,8 +267,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%zu [%lu] gtt total\n",
- i915_gtt_vm->total,
- dev_priv->gtt.mappable_end - i915_gtt_vm->start);
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
mutex_unlock(&dev->struct_mutex);
@@ -284,7 +290,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
return ret;
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
+ list_for_each_entry(obj, ggtt_list(bound_list), gtt_list) {
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
@@ -292,7 +298,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
describe_obj(m, obj);
seq_printf(m, "\n");
total_obj_size += obj->base.size;
- total_gtt_size += i915_gem_obj_size(obj);
+ total_gtt_size += i915_gem_ggtt_size(obj);
count++;
}
@@ -338,12 +344,12 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", i915_gem_obj_offset(obj));
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", i915_gem_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", i915_gem_obj_offset(obj));
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", i915_gem_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -1793,10 +1799,11 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &i915_gtt_vm->inactive_list,
+ /* FIXME: Do this for all vms? */
+ list_for_each_entry_safe(obj, next, ggtt_list(inactive_list),
mm_list)
if (obj->pin_count == 0) {
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
if (ret)
goto unlock;
}
@@ -1806,6 +1813,7 @@ i915_drop_caches_set(void *data, u64 val)
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
gtt_list)
if (obj->pages_pin_count == 0) {
+ /* FIXME: Do this for all vms? */
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
@@ -1361,7 +1361,7 @@ cleanup_gem:
i915_gem_cleanup_ringbuffer(dev);
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
- drm_mm_takedown(&i915_gtt_vm->mm);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1782,7 +1782,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- drm_mm_takedown(&i915_gtt_vm->mm);
+ list_del(&dev_priv->vm_list);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
+
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -480,10 +480,6 @@ struct i915_gtt {
unsigned long *mappable_end);
void (*gtt_remove)(struct drm_device *dev);
};
-#define i915_gtt_vm ((struct i915_address_space *) \
- list_first_entry(&dev_priv->vm_list,\
- struct i915_address_space, \
- global_link))
struct i915_hw_ppgtt {
struct i915_address_space base;
@@ -1287,45 +1283,6 @@ struct drm_i915_gem_object {
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
-static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- return vma->node.start;
-}
-
-static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o)
-{
- return !list_empty(&o->vma_list);
-}
-
-static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- return vma->node.size;
-}
-
-static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
- enum i915_cache_level color)
-{
- struct i915_vma *vma;
- BUG_ON(list_empty(&o->vma_list));
- vma = list_first_entry(&o->vma_list, struct i915_vma, vma_link);
- vma->node.color = color;
-}
-
-/* This is a temporary define to help transition us to real VMAs. If you see
- * this, you're either reviewing code, or bisecting it. */
-static inline struct i915_vma *__i915_obj_to_vma(struct drm_i915_gem_object *obj)
-{
- BUG_ON(!i915_gem_obj_bound(obj));
- BUG_ON(list_empty(&obj->vma_list));
- return list_first_entry(&obj->vma_list, struct i915_vma, vma_link);
-}
-
/**
* Request queue structure.
*
@@ -1626,15 +1583,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1664,6 +1624,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
@@ -1766,6 +1727,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
enum i915_cache_level cache_level);
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
@@ -1774,6 +1736,54 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags);
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_cache_level color);
+struct i915_vma *i915_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+/* Some GGTT VM helpers */
+#define ggtt_list(list_name) &(dev_priv->gtt.base.list_name)
+#define obj_to_ggtt(obj) \
+ &((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base
+static inline bool is_i915_ggtt(struct i915_address_space *vm)
+{
+ return (vm == &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base);
+}
+
+static inline bool i915_gem_obj_bound_ggtt(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long i915_gem_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_ggtt_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
+ map_and_fenceable, nonblocking);
+}
+#undef obj_to_ggtt
+
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
@@ -1823,7 +1833,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
@@ -1831,6 +1843,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */
+#define I915_INVALID_OFFSET 0x1
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
@@ -39,6 +39,7 @@
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking);
@@ -55,7 +56,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
-static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
+static long i915_gem_purge(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
@@ -138,7 +140,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return i915_gem_obj_bound(obj) && !obj->active;
+ return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
@@ -179,12 +181,12 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list)
+ list_for_each_entry(obj, ggtt_list(bound_list), gtt_list)
if (obj->pin_count)
- pinned += i915_gem_obj_size(obj);
+ pinned += i915_gem_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
- args->aper_size = i915_gtt_vm->total;
+ args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -425,7 +427,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
* anyway again before the next pread happens. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush = 1;
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound_ggtt(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -597,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true, true);
+ ret = i915_gem_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -612,7 +614,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = i915_gem_obj_offset(obj) + args->offset;
+ offset = i915_gem_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -742,7 +744,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* right away and we therefore have to clflush anyway. */
if (obj->cache_level == I915_CACHE_NONE)
needs_clflush_after = 1;
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound_ggtt(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
@@ -1350,7 +1352,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_ggtt_pin(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1364,7 +1366,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn += (i915_gem_obj_offset(obj) >> PAGE_SHIFT) + page_offset;
+ pfn += (i915_gem_ggtt_offset(obj) >> PAGE_SHIFT) + page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1504,7 +1506,8 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
* offsets on purgeable objects by truncating it and marking it purged,
* which prevents userspace from ever using that object again.
*/
- i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
+ i915_gem_purge(dev_priv, &dev_priv->gtt.base,
+ obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
goto out;
@@ -1670,7 +1673,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(i915_gem_obj_bound(obj));
+ BUG_ON(i915_gem_obj_bound_any(obj));
if (obj->pages_pin_count)
return -EBUSY;
@@ -1690,14 +1693,15 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
}
static long
-__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
+__i915_gem_shrink(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm,
+ long target,
bool purgeable_only)
{
struct drm_i915_gem_object *obj, *next;
long count = 0;
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.unbound_list,
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
gtt_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
i915_gem_object_put_pages(obj) == 0) {
@@ -1707,11 +1711,9 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next,
- &i915_gtt_vm->inactive_list,
- mm_list) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_unbind(obj) == 0 &&
+ i915_gem_object_unbind(obj, vm) == 0 &&
i915_gem_object_put_pages(obj) == 0) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
@@ -1723,9 +1725,10 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
static long
-i915_gem_purge(struct drm_i915_private *dev_priv, long target)
+i915_gem_purge(struct drm_i915_private *dev_priv, struct i915_address_space *vm,
+ long target)
{
- return __i915_gem_shrink(dev_priv, target, true);
+ return __i915_gem_shrink(dev_priv, vm, target, true);
}
static void
@@ -1785,7 +1788,9 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
- i915_gem_purge(dev_priv, page_count);
+ struct i915_address_space *vm;
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_purge(dev_priv, vm, page_count);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
if (IS_ERR(page)) {
@@ -1867,6 +1872,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
@@ -1883,7 +1889,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &i915_gtt_vm->active_list);
+ list_move_tail(&obj->mm_list, &vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -1903,15 +1909,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
static void
-i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_move_tail(&obj->mm_list, &vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2124,13 +2128,16 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object,
ring_list);
- i915_gem_object_move_to_inactive(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ i915_gem_object_move_to_inactive(obj, vm);
+ }
}
}
@@ -2160,6 +2167,7 @@ void i915_gem_reset(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
+ struct i915_address_space *vm;
int i;
for_each_ring(ring, dev_priv, i)
@@ -2170,8 +2178,9 @@ void i915_gem_reset(struct drm_device *dev)
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list)
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ list_for_each_entry(obj, &vm->inactive_list, mm_list)
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
/* The fence registers are invalidated so clear them out */
i915_gem_reset_fences(dev);
@@ -2217,6 +2226,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
obj = list_first_entry(&ring->active_list,
@@ -2226,7 +2237,8 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
if (!i915_seqno_passed(seqno, obj->last_read_seqno))
break;
- i915_gem_object_move_to_inactive(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_object_move_to_inactive(obj, vm);
}
if (unlikely(ring->trace_irq_seqno &&
@@ -2472,13 +2484,14 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct i915_vma *vma;
int ret;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound(obj, vm))
return 0;
if (obj->pin_count)
@@ -2501,7 +2514,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- trace_i915_gem_object_unbind(obj);
+ trace_i915_gem_object_unbind(obj, vm);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@@ -2516,7 +2529,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
/* Avoid an unnecessary call to unbind on rebind. */
obj->map_and_fenceable = true;
- vma = __i915_obj_to_vma(obj);
+ vma = i915_obj_to_vma(obj, vm);
list_del_init(&vma->vma_link);
drm_mm_remove_node(&vma->node);
i915_gem_vma_destroy(vma);
@@ -2561,11 +2574,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
}
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
- val = (uint64_t)((i915_gem_obj_offset(obj) + size - 4096) &
+ val = (uint64_t)((i915_gem_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= i915_gem_obj_offset(obj) & 0xfffff000;
+ val |= i915_gem_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2585,15 +2598,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (i915_gem_obj_offset(obj) & (size - 1)),
+ (i915_gem_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- i915_gem_obj_offset(obj), obj->map_and_fenceable, size);
+ i915_gem_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2604,7 +2617,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_offset(obj);
+ val = i915_gem_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2629,19 +2642,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = i915_gem_obj_size(obj);
+ u32 size = i915_gem_ggtt_size(obj);
uint32_t pitch_val;
- WARN((i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (i915_gem_obj_offset(obj) & (size - 1)),
+ (i915_gem_ggtt_offset(obj) & (size - 1)),
"object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
- i915_gem_obj_offset(obj), size);
+ i915_gem_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = i915_gem_obj_offset(obj);
+ val = i915_gem_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -2938,6 +2951,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
*/
static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
unsigned alignment,
bool map_and_fenceable,
bool nonblocking)
@@ -2946,8 +2960,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
- size_t max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.base.total;
+ size_t max = map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
@@ -2989,20 +3002,23 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- vma = i915_gem_vma_create(obj);
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_empty(&obj->vma_list));
+
+ vma = i915_gem_vma_create(obj, vm);
if (vma == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
search_free:
- ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm, &vma->node,
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
obj->cache_level, 0, max,
DRM_MM_CREATE_DEFAULT,
DRM_MM_SEARCH_DEFAULT);
if (ret) {
- ret = i915_gem_evict_something(dev, size, alignment,
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
@@ -3029,12 +3045,16 @@ search_free:
return ret;
}
- list_move_tail(&obj->gtt_list, &i915_gtt_vm->bound_list);
- list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
- list_add(&vma->vma_link, &obj->vma_list);
+ list_move_tail(&obj->gtt_list, &vm->bound_list);
+ list_add_tail(&obj->mm_list, &vm->inactive_list);
+ /* Keep GGTT vmas first to make debug easier */
+ if (is_i915_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
- fenceable = i915_gem_obj_size(obj) == fence_size &&
- (i915_gem_obj_offset(obj) & (fence_alignment - 1)) == 0;
+ fenceable = i915_gem_ggtt_size(obj) == fence_size &&
+ (i915_gem_ggtt_offset(obj) & (fence_alignment - 1)) == 0;
mappable =
vma->node.start + obj->base.size <= dev_priv->gtt.mappable_end;
@@ -3042,7 +3062,7 @@ search_free:
obj->map_and_fenceable = mappable && fenceable;
i915_gem_object_unpin_pages(obj);
- trace_i915_gem_object_bind(obj, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, vm, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
}
@@ -3140,7 +3160,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3179,12 +3199,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
/* And bump the LRU for this access */
if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_move_tail(&obj->mm_list, ggtt_list(inactive_list));
return 0;
}
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
@@ -3200,16 +3221,16 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (i915_gem_obj_bound(obj))
- node = &__i915_obj_to_vma(obj)->node;
+ if (i915_gem_obj_bound(obj, vm))
+ node = &(i915_obj_to_vma(obj, vm)->node);
if (!i915_gem_valid_gtt_space(dev, node, cache_level)) {
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
if (ret)
return ret;
}
- if (i915_gem_obj_bound(obj)) {
+ if (i915_gem_obj_bound(obj, vm)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3232,7 +3253,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
i915_ppgtt_bind_object(dev_priv->gtt.aliasing_ppgtt,
obj, cache_level);
- i915_gem_obj_set_color(obj, cache_level);
+ i915_gem_obj_set_color(obj, vm, cache_level);
}
if (cache_level == I915_CACHE_NONE) {
@@ -3292,6 +3313,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_caching *args = data;
+ struct drm_i915_private *dev_priv;
struct drm_i915_gem_object *obj;
enum i915_cache_level level;
int ret;
@@ -3316,8 +3338,10 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
ret = -ENOENT;
goto unlock;
}
+ dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_set_cache_level(obj, level);
+ /* FIXME: Add interface for specific VM? */
+ ret = i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base, level);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3335,6 +3359,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined)
{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
u32 old_read_domains, old_write_domain;
int ret;
@@ -3353,7 +3378,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_NONE);
if (ret)
return ret;
@@ -3361,7 +3387,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true, false);
+ ret = i915_gem_ggtt_pin(obj, alignment, true, false);
if (ret)
return ret;
@@ -3504,6 +3530,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
@@ -3513,26 +3540,28 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (i915_gem_obj_bound(obj)) {
- if ((alignment && i915_gem_obj_offset(obj) & (alignment - 1)) ||
+ BUG_ON(map_and_fenceable && !is_i915_ggtt(vm));
+
+ if (i915_gem_obj_bound(obj, vm)) {
+ if ((alignment && i915_gem_obj_offset(obj, vm) & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- i915_gem_obj_offset(obj), alignment,
+ i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
if (ret)
return ret;
}
}
- if (!i915_gem_obj_bound(obj)) {
+ if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ ret = i915_gem_object_bind_to_gtt(obj, vm, alignment,
map_and_fenceable,
nonblocking);
if (ret)
@@ -3555,7 +3584,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(!i915_gem_obj_bound(obj));
+ BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3593,9 +3622,12 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
- ret = i915_gem_object_pin(obj, args->alignment, true, false);
+ ret = i915_gem_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
+ } else {
+ if (!list_is_singular(&obj->vma_list))
+ DRM_DEBUG_DRIVER("Trying to pin an object in multiple VMs\n");
}
obj->user_pin_count++;
@@ -3605,7 +3637,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = i915_gem_obj_offset(obj);
+ args->offset = i915_gem_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3828,6 +3860,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@@ -3835,15 +3868,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
- bool was_interruptible;
+ /* NB: 0 or 1 elements */
+ WARN_ON(!list_empty(&obj->vma_list) && !list_is_singular(&obj->vma_list));
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ if (WARN_ON(i915_gem_object_unbind(obj, vma->vm) == -ERESTARTSYS)) {
+ bool was_interruptible;
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_gem_object_unbind(obj, vma->vm));
- dev_priv->mm.interruptible = was_interruptible;
+ dev_priv->mm.interruptible = was_interruptible;
+ }
}
obj->pages_pin_count = 0;
@@ -3863,15 +3900,19 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
-struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj)
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ struct i915_vma *vma;
+ BUG_ON(!vm);
+
+ vma = kzalloc(sizeof(*vma), GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
+
INIT_LIST_HEAD(&vma->vma_link);
- vma->vm = i915_gtt_vm;
+ vma->vm = vm;
vma->obj = obj;
return vma;
@@ -4101,10 +4142,10 @@ int i915_gem_init(struct drm_device *dev)
if (intel_enable_ppgtt(dev) && HAS_HW_CONTEXTS(dev)) {
i915_gem_setup_global_gtt(dev, 0, dev_priv->gtt.mappable_end,
- i915_gtt_vm->total, false);
+ dev_priv->gtt.base.total, false);
i915_gem_context_init(dev);
if (dev_priv->hw_contexts_disabled) {
- drm_mm_takedown(&i915_gtt_vm->mm);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
goto ggtt_only;
}
}
@@ -4114,7 +4155,7 @@ ggtt_only:
if (HAS_HW_CONTEXTS(dev))
DRM_DEBUG_DRIVER("Context setup failed\n");
i915_gem_setup_global_gtt(dev, 0, dev_priv->gtt.mappable_end,
- i915_gtt_vm->total, true);
+ dev_priv->gtt.base.total, true);
}
ret = i915_gem_init_hw(dev);
@@ -4165,7 +4206,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- BUG_ON(!list_empty(&i915_gtt_vm->active_list));
+ BUG_ON(!list_empty(ggtt_list(active_list)));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4214,6 +4255,17 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
+{
+ vm->dev = dev_priv->dev;
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->bound_list);
+ INIT_LIST_HEAD(&vm->global_link);
+ list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
@@ -4226,10 +4278,10 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&i915_gtt_vm->active_list);
- INIT_LIST_HEAD(&i915_gtt_vm->inactive_list);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
- INIT_LIST_HEAD(&i915_gtt_vm->bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
for (i = 0; i < I915_NUM_RINGS; i++)
init_ring_lists(&dev_priv->ring[i]);
@@ -4497,8 +4549,9 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
+ struct i915_address_space *vm;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
+ int nr_to_scan;
bool unlock = true;
int cnt;
@@ -4512,24 +4565,99 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ nr_to_scan = sc->nr_to_scan;
+ if (nr_to_scan) {
+ nr_to_scan -= i915_gem_purge(dev_priv, vm, nr_to_scan);
+ if (nr_to_scan > 0)
+ nr_to_scan -= __i915_gem_shrink(dev_priv, vm, nr_to_scan,
+ false);
+ if (nr_to_scan > 0)
+ i915_gem_shrink_all(dev_priv);
+ }
}
cnt = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, gtt_list)
- if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ list_for_each_entry(obj, &vm->inactive_list, gtt_list)
+ if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+ cnt += obj->base.size >> PAGE_SHIFT;
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.start;
+ }
+ return I915_INVALID_OFFSET;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ return !list_empty(&o->vma_list);
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm) {
+ return true;
+ }
+ }
+ return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.size;
+ }
+ return 0;
+}
+
+void i915_gem_obj_set_color(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm,
+ enum i915_cache_level color)
+{
+ struct i915_vma *vma;
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm) {
+ vma->node.color = color;
+ return;
+ }
+ }
+
+ WARN(1, "Couldn't set color for VM %p\n", vm);
+}
+
+struct i915_vma *i915_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
@@ -156,6 +156,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
+ &dev_priv->gtt.base,
I915_CACHE_LLC_MLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
@@ -220,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* may not be available. To avoid this we always pin the
* default context.
*/
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
@@ -368,7 +369,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_offset(new_context->obj) |
+ intel_ring_emit(ring, i915_gem_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -389,6 +390,7 @@ mi_set_context(struct intel_ring_buffer *ring,
static int do_switch(struct i915_hw_context *to)
{
struct intel_ring_buffer *ring = to->ring;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct i915_hw_context *from = ring->last_context;
u32 hw_flags = 0;
int ret;
@@ -398,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -435,7 +437,7 @@ static int do_switch(struct i915_hw_context *to)
*/
if (from != NULL) {
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from->obj, ring);
+ i915_gem_object_move_to_active(from->obj, &dev_priv->gtt.base, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -32,20 +32,18 @@
#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- struct i915_vma *vma = __i915_obj_to_vma(obj);
-
- if (obj->pin_count)
+ if (vma->obj->pin_count)
return false;
- list_add(&obj->exec_list, unwind);
+ list_add(&vma->obj->exec_list, unwind);
return drm_mm_scan_add_block(&vma->node);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -81,16 +79,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
INIT_LIST_HEAD(&unwind_list);
if (mappable)
- drm_mm_init_scan_with_range(&i915_gtt_vm->mm, min_size,
- alignment, cache_level, 0,
+ drm_mm_init_scan_with_range(&vm->mm, min_size, alignment,
+ cache_level, 0,
dev_priv->gtt.mappable_end);
else
- drm_mm_init_scan(&i915_gtt_vm->mm, min_size, alignment,
- cache_level);
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &i915_gtt_vm->inactive_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(obj, &vm->inactive_list, mm_list) {
+ struct i915_vma *vma = i915_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -98,8 +96,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(obj, &vm->active_list, mm_list) {
+ struct i915_vma *vma = i915_obj_to_vma(obj, vm);
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -110,7 +109,7 @@ none:
struct drm_i915_gem_object,
exec_list);
- vma = __i915_obj_to_vma(obj);
+ vma = i915_obj_to_vma(obj, vm);
ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
@@ -132,7 +131,7 @@ found:
obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object,
exec_list);
- vma = __i915_obj_to_vma(obj);
+ vma = i915_obj_to_vma(obj, vm);
if (drm_mm_scan_remove_block(&vma->node)) {
list_move(&obj->exec_list, &eviction_list);
drm_gem_object_reference(&obj->base);
@@ -147,7 +146,7 @@ found:
struct drm_i915_gem_object,
exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
@@ -161,11 +160,17 @@ i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
- bool lists_empty;
+ struct i915_address_space *vm;
+ bool lists_empty = true;
int ret;
- lists_empty = (list_empty(&i915_gtt_vm->inactive_list) &&
- list_empty(&i915_gtt_vm->active_list));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
if (lists_empty)
return -ENOSPC;
@@ -182,10 +187,12 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &i915_gtt_vm->inactive_list, mm_list)
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
+ if (obj->pin_count == 0)
+ WARN_ON(i915_gem_object_unbind(obj, vm));
+ }
+
return 0;
}
@@ -174,7 +174,8 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -188,7 +189,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = i915_gem_obj_offset(target_i915_obj);
+ target_offset = i915_gem_obj_offset(target_i915_obj, vm);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -280,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return ret;
/* Map the page containing the relocation we're going to perform. */
- reloc->offset += i915_gem_obj_offset(obj);
+ reloc->offset += i915_gem_obj_offset(obj, vm);
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
@@ -297,7 +298,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb)
+ struct eb_objects *eb,
+ struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +323,7 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r, vm);
if (ret)
return ret;
@@ -344,13 +346,14 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs)
+ struct drm_i915_gem_relocation_entry *relocs,
+ struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i], vm);
if (ret)
return ret;
}
@@ -359,7 +362,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -373,7 +377,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@@ -395,6 +399,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
+ struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +414,7 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+ ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable, false);
if (ret)
return ret;
@@ -436,8 +441,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != i915_gem_obj_offset(obj)) {
- entry->offset = i915_gem_obj_offset(obj);
+ if (entry->offset != i915_gem_obj_offset(obj,vm )) {
+ entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@@ -458,7 +463,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@@ -475,6 +480,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
+ struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -530,7 +536,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
- if (!i915_gem_obj_bound(obj))
+ if (!i915_gem_obj_bound(obj, vm))
continue;
need_fence =
@@ -539,22 +545,24 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
+ BUG_ON((need_mappable || need_fence) && !is_i915_ggtt(vm));
+
if ((entry->alignment &&
- i915_gem_obj_offset(obj) & (entry->alignment - 1)) ||
+ i915_gem_obj_offset(obj, vm) & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj, vm);
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (i915_gem_obj_bound(obj))
+ if (i915_gem_obj_bound(obj, vm))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@@ -578,7 +586,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@@ -662,14 +671,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset]);
+ reloc + reloc_offset[offset],
+ vm);
if (ret)
goto err;
}
@@ -768,6 +778,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -782,7 +793,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, ring);
+ i915_gem_object_move_to_active(obj, vm, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
@@ -835,7 +846,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@@ -988,17 +1000,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb);
+ ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1049,7 +1061,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = i915_gem_obj_offset(batch_obj) + args->batch_start_offset;
+ exec_start = i915_gem_obj_offset(batch_obj, vm) + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1074,7 +1086,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
@@ -1095,6 +1107,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1150,7 +1163,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1176,6 +1190,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1206,7 +1221,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
@@ -276,12 +276,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* multiplied by page size. We allocate at the top of the GTT to avoid
* fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&i915_gtt_vm->mm));
- ret = drm_mm_insert_node_in_range_generic(&i915_gtt_vm->mm,
+ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
dev_priv->gtt.mappable_end,
- i915_gtt_vm->total,
+ dev_priv->gtt.base.total,
DRM_MM_TOPDOWN);
if (ret)
return ret;
@@ -376,6 +376,8 @@ int i915_gem_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
ppgtt->base.total);
+ /* i915_init_vm(dev_priv, &ppgtt->base) */
+
return ret;
}
@@ -383,17 +385,26 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- cache_level);
+ struct i915_address_space *vm = &ppgtt->base;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
+ unsigned long obj_offset;
+
+ /* for now ppgtt is always aliasing PPGTT */
+ if (ppgtt == dev_priv->gtt.aliasing_ppgtt)
+ obj_offset = i915_gem_ggtt_offset(obj);
+ else
+ BUG_ON("Multiple VMs not yet support\n");
+ vm->insert_entries(vm, obj->pages,
+ obj_offset >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- ppgtt->base.clear_range(&ppgtt->base,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ struct i915_address_space *vm = &ppgtt->base;
+ vm->clear_range(vm, i915_gem_obj_offset(obj, vm) >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
extern int intel_iommu_gfx_mapped;
@@ -437,17 +448,17 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- i915_gtt_vm->clear_range(&dev_priv->gtt.base,
- i915_gtt_vm->start / PAGE_SIZE,
- i915_gtt_vm->total / PAGE_SIZE);
+ gtt_vm->clear_range(&dev_priv->gtt.base, gtt_vm->start / PAGE_SIZE,
+ gtt_vm->total / PAGE_SIZE);
if (dev_priv->gtt.aliasing_ppgtt)
gen6_write_pdes(dev_priv->gtt.aliasing_ppgtt);
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
+ list_for_each_entry(obj, >t_vm->bound_list, gtt_list) {
i915_gem_clflush_object(obj);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -558,10 +569,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
+ uint32_t obj_offset = i915_gem_obj_offset(obj, gtt_vm);
- i915_gtt_vm->insert_entries(&dev_priv->gtt.base, obj->pages,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- cache_level);
+ gtt_vm->insert_entries(gtt_vm, obj->pages, obj_offset >> PAGE_SHIFT,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -570,10 +582,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
+ uint32_t obj_offset = i915_gem_obj_offset(obj, gtt_vm);
- i915_gtt_vm->clear_range(&dev_priv->gtt.base,
- i915_gem_obj_offset(obj) >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ gtt_vm->clear_range(gtt_vm, obj_offset >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -640,7 +653,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -648,53 +662,53 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
if (!guard_page)
- drm_mm_init(&i915_gtt_vm->mm, start, end - start);
+ drm_mm_init(>t_vm->mm, start, end - start);
else
- drm_mm_init(&i915_gtt_vm->mm, start, end - start - PAGE_SIZE);
+ drm_mm_init(>t_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
- i915_gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
+ gtt_vm->mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
- struct i915_vma *vma = __i915_obj_to_vma(obj);
+ list_for_each_entry(obj, >t_vm->bound_list, gtt_list) {
+ struct i915_vma *vma = i915_obj_to_vma(obj, gtt_vm);
uintptr_t gtt_offset = (uintptr_t)vma->deferred_offset;
int ret;
DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
- i915_gem_obj_offset(obj), obj->base.size);
+ i915_gem_obj_offset(obj, gtt_vm), obj->base.size);
BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0);
gtt_offset = gtt_offset & ~I915_GTT_RESERVED;
- ret = drm_mm_create_block(&i915_gtt_vm->mm,
+ ret = drm_mm_create_block(>t_vm->mm,
&vma->node,
gtt_offset,
obj->base.size);
if (ret)
DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
- list_add(&__i915_obj_to_vma(obj)->vma_link, &obj->vma_list);
+ list_add(&vma->vma_link, &obj->vma_list);
}
- i915_gtt_vm->start = start;
- i915_gtt_vm->total = end - start;
+ gtt_vm->start = start;
+ gtt_vm->total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &i915_gtt_vm->mm,
- hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, >t_vm->mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- i915_gtt_vm->clear_range(i915_gtt_vm, hole_start / PAGE_SIZE,
- (hole_end-hole_start) / PAGE_SIZE);
+ gtt_vm->clear_range(gtt_vm, hole_start / PAGE_SIZE,
+ (hole_end-hole_start) / PAGE_SIZE);
}
/* And finally clear the reserved guard page (if exists) */
if (guard_page)
- i915_gtt_vm->clear_range(i915_gtt_vm, end / PAGE_SIZE - 1, 1);
+ gtt_vm->clear_range(gtt_vm, end / PAGE_SIZE - 1, 1);
}
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct page *page;
dma_addr_t dma_addr;
@@ -712,8 +726,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- i915_gtt_vm->scratch.page = page;
- i915_gtt_vm->scratch.addr = dma_addr;
+ gtt_vm->scratch.page = page;
+ gtt_vm->scratch.addr = dma_addr;
return 0;
}
@@ -721,12 +735,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
- set_pages_wb(i915_gtt_vm->scratch.page, 1);
- pci_unmap_page(dev->pdev, i915_gtt_vm->scratch.addr,
+ set_pages_wb(gtt_vm->scratch.page, 1);
+ pci_unmap_page(dev->pdev, gtt_vm->scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(i915_gtt_vm->scratch.page);
- __free_page(i915_gtt_vm->scratch.page);
+ put_page(gtt_vm->scratch.page);
+ __free_page(gtt_vm->scratch.page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -750,6 +765,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
@@ -789,8 +805,8 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
- i915_gtt_vm->clear_range = gen6_ggtt_clear_range;
- i915_gtt_vm->insert_entries = gen6_ggtt_insert_entries;
+ gtt_vm->clear_range = gen6_ggtt_clear_range;
+ gtt_vm->insert_entries = gen6_ggtt_insert_entries;
return ret;
}
@@ -809,6 +825,7 @@ static int i915_gmch_probe(struct drm_device *dev,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
@@ -820,8 +837,8 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- i915_gtt_vm->clear_range = i915_ggtt_clear_range;
- i915_gtt_vm->insert_entries = i915_ggtt_insert_entries;
+ gtt_vm->clear_range = i915_ggtt_clear_range;
+ gtt_vm->insert_entries = i915_ggtt_insert_entries;
return 0;
}
@@ -320,6 +320,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *gtt_vm = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
@@ -358,7 +359,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
return NULL;
}
- vma = i915_gem_vma_create(obj);
+ vma = i915_gem_vma_create(obj, gtt_vm);
if (!vma) {
drm_gem_object_unreference(&obj->base);
return NULL;
@@ -368,9 +369,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&i915_gtt_vm->mm)) {
- ret = drm_mm_create_block(&i915_gtt_vm->mm, &vma->node,
- gtt_offset, size);
+ if (drm_mm_initialized(>t_vm->mm)) {
+ ret = drm_mm_create_block(>t_vm->mm, &vma->node, gtt_offset,
+ size);
if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
i915_gem_vma_destroy(vma);
@@ -383,8 +384,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
obj->has_global_gtt_mapping = 1;
- list_add_tail(&obj->gtt_list, &i915_gtt_vm->bound_list);
- list_add_tail(&obj->mm_list, &i915_gtt_vm->inactive_list);
+ list_add_tail(&obj->gtt_list, >t_vm->bound_list);
+ list_add_tail(&obj->mm_list, >t_vm->inactive_list);
return obj;
}
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK)
+ if (i915_gem_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK)
+ if (i915_gem_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (i915_gem_obj_size(obj) != size)
+ if (i915_gem_ggtt_size(obj) != size)
return false;
- if (i915_gem_obj_offset(obj) & (size - 1))
+ if (i915_gem_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -358,8 +358,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
* whilst executing a fenced command for an untiled object.
*/
- obj->map_and_fenceable = !i915_gem_obj_bound(obj) ||
- (i915_gem_obj_offset(obj) +
+ obj->map_and_fenceable = !i915_gem_obj_bound_ggtt(obj) ||
+ (i915_gem_ggtt_offset(obj) +
obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
@@ -369,8 +369,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (i915_gem_obj_offset(obj) & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
+ if (i915_gem_ggtt_offset(obj) & (unfenced_alignment - 1))
+ ret = i915_gem_object_unbind(obj, &dev_priv->gtt.base);
}
if (ret == 0) {
@@ -1474,7 +1474,8 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
if (dst == NULL)
return NULL;
- reloc_offset = i915_gem_obj_offset(src);
+ /* FIXME: must handle per faulty VM */
+ reloc_offset = i915_gem_ggtt_offset(src);
for (i = 0; i < num_pages; i++) {
unsigned long flags;
void *d;
@@ -1526,7 +1527,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv,
reloc_offset += PAGE_SIZE;
}
dst->page_count = num_pages;
- dst->gtt_offset = i915_gem_obj_offset(src);
+ dst->gtt_offset = i915_gem_ggtt_offset(src);
return dst;
@@ -1578,7 +1579,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->name = obj->base.name;
err->rseqno = obj->last_read_seqno;
err->wseqno = obj->last_write_seqno;
- err->gtt_offset = i915_gem_obj_offset(obj);
+ /* FIXME: plumb the actual context into here to pull the right VM */
+ err->gtt_offset = i915_gem_ggtt_offset(obj);
err->read_domains = obj->base.read_domains;
err->write_domain = obj->base.write_domain;
err->fence_reg = obj->fence_reg;
@@ -1672,17 +1674,20 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
+ if (WARN_ON(HAS_HW_CONTEXTS(dev_priv->dev)))
+ return NULL;
+
if (WARN_ON(ring->id != RCS))
return NULL;
obj = ring->private;
- if (acthd >= i915_gem_obj_offset(obj) &&
- acthd < i915_gem_obj_offset(obj) + obj->base.size)
+ if (acthd >= i915_gem_ggtt_offset(obj) &&
+ acthd < i915_gem_ggtt_offset(obj) + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list) {
+ list_for_each_entry(obj, ggtt_list(active_list), mm_list) {
if (obj->ring != ring)
continue;
@@ -1757,8 +1762,9 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
if (ring->id != RCS || !error->ccid)
return;
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list) {
- if ((error->ccid & PAGE_MASK) == i915_gem_obj_offset(obj)) {
+ list_for_each_entry(obj, ggtt_list(bound_list), gtt_list) {
+ if ((error->ccid & PAGE_MASK) ==
+ i915_gem_ggtt_offset(obj)) {
ering->ctx = i915_error_object_create_sized(dev_priv,
obj, 1);
}
@@ -1891,10 +1897,10 @@ static void i915_capture_error_state(struct drm_device *dev)
error->pinned_bo = NULL;
i = 0;
- list_for_each_entry(obj, &i915_gtt_vm->active_list, mm_list)
+ list_for_each_entry(obj, ggtt_list(active_list), mm_list)
i++;
error->active_bo_count = i;
- list_for_each_entry(obj, &i915_gtt_vm->bound_list, gtt_list)
+ list_for_each_entry(obj, ggtt_list(bound_list), gtt_list)
if (obj->pin_count)
i++;
error->pinned_bo_count = i - error->active_bo_count;
@@ -1913,14 +1919,13 @@ static void i915_capture_error_state(struct drm_device *dev)
error->active_bo_count =
capture_active_bo(error->active_bo,
error->active_bo_count,
- &i915_gtt_vm->active_list);
+ ggtt_list(active_list));
if (error->pinned_bo)
error->pinned_bo_count =
capture_pinned_bo(error->pinned_bo,
error->pinned_bo_count,
- &i915_gtt_vm->bound_list);
-
+ ggtt_list(bound_list));
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
@@ -2111,10 +2116,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- i915_gem_obj_offset(obj);
+ i915_gem_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (i915_gem_obj_offset(obj) +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -34,11 +34,13 @@ TRACE_EVENT(i915_gem_object_create,
);
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
- TP_ARGS(obj, mappable),
+ TP_PROTO(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm, bool mappable),
+ TP_ARGS(obj, vm, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
@@ -46,8 +48,8 @@ TRACE_EVENT(i915_gem_object_bind,
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = i915_gem_obj_offset(obj);
- __entry->size = i915_gem_obj_size(obj);
+ __entry->offset = i915_gem_obj_offset(obj, vm);
+ __entry->size = i915_gem_obj_size(obj, vm);
__entry->mappable = mappable;
),
@@ -57,19 +59,21 @@ TRACE_EVENT(i915_gem_object_bind,
);
TRACE_EVENT(i915_gem_object_unbind,
- TP_PROTO(struct drm_i915_gem_object *obj),
- TP_ARGS(obj),
+ TP_PROTO(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm),
+ TP_ARGS(obj, vm),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->offset = i915_gem_obj_offset(obj);
- __entry->size = i915_gem_obj_size(obj);
+ __entry->offset = i915_gem_obj_offset(obj, vm);
+ __entry->size = i915_gem_obj_size(obj, vm);
),
TP_printk("obj=%p, offset=%08x size=%x",
@@ -2062,18 +2062,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
}
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_offset(obj), linear_offset, x, y,
+ i915_gem_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_offset(obj) +
+ i915_gem_ggtt_offset(obj) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
I915_WRITE(DSPADDR(plane),
- i915_gem_obj_offset(obj) + linear_offset);
+ i915_gem_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2154,11 +2154,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
linear_offset -= intel_crtc->dspaddr_offset;
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- i915_gem_obj_offset(obj), linear_offset, x, y,
+ i915_gem_ggtt_offset(obj), linear_offset, x, y,
fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- i915_gem_obj_offset(obj)+intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj)+intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -6615,7 +6615,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = i915_gem_obj_offset(obj);
+ addr = i915_gem_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -7322,7 +7322,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7364,7 +7364,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7404,7 +7404,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset) |
+ (i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7448,7 +7448,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7514,7 +7514,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring,
- i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset);
+ i915_gem_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
@@ -138,11 +138,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_offset(obj);
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj),
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -167,7 +167,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- i915_gem_obj_offset(obj), obj);
+ i915_gem_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- i915_gem_obj_offset(overlay->reg_bo));
+ i915_gem_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_Y,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_Y,
®s->OBUF_0Y);
ostride = params->stride_Y;
@@ -755,9 +755,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_U,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_U,
®s->OBUF_0U);
- iowrite32(i915_gem_obj_offset(new_bo) + params->offset_V,
+ iowrite32(i915_gem_ggtt_offset(new_bo) + params->offset_V,
®s->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1353,12 +1353,12 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = i915_gem_obj_offset(reg_bo);
+ overlay->flip_addr = i915_gem_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1437,7 +1437,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
return io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- i915_gem_obj_offset(overlay->reg_bo));
+ i915_gem_ggtt_offset(overlay->reg_bo));
}
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
@@ -1468,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = i915_gem_obj_offset(overlay->reg_bo);
+ error->base = i915_gem_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
@@ -217,7 +217,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_offset(obj) | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -2270,7 +2270,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true, false);
+ ret = i915_gem_ggtt_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -3047,7 +3047,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, i915_gem_obj_offset(dev_priv->ips.renderctx) |
+ intel_ring_emit(ring, i915_gem_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3070,7 +3070,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, i915_gem_obj_offset(dev_priv->ips.pwrctx) |
+ I915_WRITE(PWRCTXA, i915_gem_ggtt_offset(dev_priv->ips.pwrctx) |
PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -400,14 +400,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, i915_gem_obj_offset(obj));
+ I915_WRITE_START(ring, i915_gem_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == i915_gem_obj_offset(obj) &&
+ I915_READ_START(ring) == i915_gem_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -439,6 +439,7 @@ out:
static int
init_pipe_control(struct intel_ring_buffer *ring)
{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct pipe_control *pc;
struct drm_i915_gem_object *obj;
int ret;
@@ -457,13 +458,14 @@ init_pipe_control(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_ggtt_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = i915_gem_obj_offset(obj);
+ pc->gtt_offset = i915_gem_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL)
goto err_unpin;
@@ -1042,7 +1044,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = i915_gem_obj_offset(obj);
+ u32 cs_offset = i915_gem_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1110,6 +1112,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
static int init_status_page(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int ret;
@@ -1120,14 +1123,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_level(obj, &dev_priv->gtt.base,
+ I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = i915_gem_obj_offset(obj);
+ ring->status_page.gfx_addr = i915_gem_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1212,7 +1216,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+ ret = i915_gem_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1221,7 +1225,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj),
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1723,7 +1727,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
@@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_offset(obj) +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -309,7 +309,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe),
- i915_gem_obj_offset(obj) + sprsurf_offset);
+ i915_gem_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -478,7 +478,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe),
- i915_gem_obj_offset(obj) + dvssurf_offset);
+ i915_gem_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
Plumb the functions we care about with VM arguments. With the exception of the hack in i915_ppgtt_bind to only ever be able to do aliasing PPGTT, this most everything we want. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_debugfs.c | 54 +++-- drivers/gpu/drm/i915/i915_dma.c | 6 +- drivers/gpu/drm/i915/i915_drv.h | 105 +++++---- drivers/gpu/drm/i915/i915_gem.c | 344 ++++++++++++++++++++--------- drivers/gpu/drm/i915/i915_gem_context.c | 10 +- drivers/gpu/drm/i915/i915_gem_evict.c | 57 ++--- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 78 ++++--- drivers/gpu/drm/i915/i915_gem_gtt.c | 107 +++++---- drivers/gpu/drm/i915/i915_gem_stolen.c | 13 +- drivers/gpu/drm/i915/i915_gem_tiling.c | 16 +- drivers/gpu/drm/i915/i915_irq.c | 35 +-- drivers/gpu/drm/i915/i915_trace.h | 20 +- drivers/gpu/drm/i915/intel_display.c | 22 +- drivers/gpu/drm/i915/intel_fb.c | 6 +- drivers/gpu/drm/i915/intel_overlay.c | 16 +- drivers/gpu/drm/i915/intel_pm.c | 8 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 28 ++- drivers/gpu/drm/i915/intel_sprite.c | 6 +- 18 files changed, 569 insertions(+), 362 deletions(-)