@@ -994,7 +994,8 @@ int i915_gem_init_object(struct drm_gem_object *obj);
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
+int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
+ bool mappable);
void i915_gem_object_unpin(struct drm_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj);
@@ -51,7 +51,7 @@ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *o
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
+ unsigned alignment, bool mappable);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -591,7 +591,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -686,7 +686,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(obj, 0);
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out_unlock;
@@ -1173,7 +1173,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
@@ -1425,7 +1425,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
* initial fault faster and any subsequent flushing possible).
*/
if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -2517,7 +2517,8 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment,
+ bool mappable)
{
struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2547,11 +2548,29 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
}
search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
+ if (mappable)
+ free_space =
+ drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ obj->size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ obj->size, alignment, 0);
+
if (free_space != NULL) {
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
+ if (mappable)
+ obj_priv->gtt_space =
+ drm_mm_get_block_range_generic(free_space,
+ obj->size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ obj_priv->gtt_space =
+ drm_mm_get_block(free_space, obj->size,
+ alignment);
+
if (obj_priv->gtt_space != NULL)
obj_priv->gtt_offset = obj_priv->gtt_space->start;
}
@@ -2562,7 +2581,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
#if WATCH_LRU
DRM_INFO("%s: GTT full, evicting something\n", __func__);
#endif
- ret = i915_gem_evict_something(dev, obj->size, alignment, true);
+ ret = i915_gem_evict_something(dev, obj->size, alignment,
+ mappable);
if (ret)
return ret;
@@ -2581,7 +2601,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
ret = i915_gem_evict_something(dev, obj->size,
- alignment, true);
+ alignment, mappable);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2611,7 +2631,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
drm_mm_put_block(obj_priv->gtt_space);
obj_priv->gtt_space = NULL;
- ret = i915_gem_evict_something(dev, obj->size, alignment, true);
+ ret = i915_gem_evict_something(dev, obj->size, alignment,
+ mappable);
if (ret)
return ret;
@@ -3190,7 +3211,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment, false);
if (ret)
return ret;
@@ -4031,7 +4052,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
+ bool mappable)
{
struct drm_device *dev = obj->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4057,7 +4079,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
}
if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ ret = i915_gem_object_bind_to_gtt(obj, alignment, mappable);
if (ret)
return ret;
}
@@ -4143,7 +4165,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj_priv->user_pin_count++;
obj_priv->pin_filp = file_priv;
if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
+ ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret != 0) {
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
@@ -4452,7 +4474,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
goto err_unref;
@@ -1446,7 +1446,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
@@ -4326,7 +4326,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
@@ -5489,7 +5489,7 @@ intel_alloc_context_page(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -753,7 +753,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
@@ -1395,7 +1395,7 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
@@ -556,7 +556,7 @@ static int init_status_page(struct drm_device *dev,
obj_priv = to_intel_bo(obj);
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
goto err_unref;
}
@@ -609,7 +609,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
ring->gem_object = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret)
goto err_unref;