@@ -524,6 +524,8 @@ typedef struct drm_i915_private {
struct drm_mm vram;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
+ /** End of mappable part of GTT */
+ unsigned long gtt_mappable_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
@@ -1052,7 +1054,8 @@ void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
+int i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
int i915_gem_evict_everything(struct drm_device *dev);
int i915_gem_evict_inactive(struct drm_device *dev);
@@ -83,6 +83,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
+ dev_priv->mm.gtt_mappable_end = end;
+
dev->gtt_total = (uint32_t) (end - start);
return 0;
@@ -328,7 +330,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
+ i915_gem_get_gtt_alignment(obj),
+ false);
if (ret)
return ret;
@@ -2559,7 +2562,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size, alignment, true);
if (ret)
return ret;
@@ -2578,7 +2581,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ alignment, true);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2608,7 +2611,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, obj->size, alignment, true);
if (ret)
return ret;
@@ -89,7 +89,8 @@ mark_free(struct drm_i915_gem_object *obj_priv,
while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
@@ -100,9 +101,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -128,7 +137,12 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {