@@ -1183,6 +1183,8 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
+i915_gem_get_unfenced_gtt_size(struct drm_i915_gem_object *obj);
+uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
/* i915_gem_gtt.c */
@@ -1427,6 +1427,35 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
}
/**
+ * i915_gem_get_unfenced_gtt_size - return required GTT size for an
+ * unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT size for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+uint32_t
+i915_gem_get_unfenced_gtt_size(struct drm_i915_gem_object *obj)
+{
+ u32 unfenced_alignment;
+
+ /*
+ * Current userspace will attempt to overallocate a bo so that it
+ * can be reused with another surface and so its size is unlikely
+ * to be an exact number of tile rows - but it promised never to
+ * access beyond the end of the last complete row.
+ *
+ * gen2 has a futher restriction, in that it requires an even number
+ * of tiles rows. Userspace was not aware of this until recently
+ * and so violated its promise to always allocate enough pages
+ * for the hardware. In reply, we now always round up the GTT
+ * allocation to the next [even] tile row.
+ */
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+ return ALIGN(obj->base.size, unfenced_alignment);
+}
+
+/**
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
* unfenced object
* @obj: object to check
@@ -2744,7 +2773,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
- u32 size, fence_size, fence_alignment, unfenced_alignment;
+ u32 size, fence_size, fence_alignment;
+ u32 unfenced_size, unfenced_alignment;
bool mappable, fenceable;
int ret;
@@ -2755,6 +2785,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
fence_size = i915_gem_get_gtt_size(obj);
fence_alignment = i915_gem_get_gtt_alignment(obj);
+ unfenced_size = i915_gem_get_unfenced_gtt_size(obj);
unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
if (alignment == 0)
@@ -2765,12 +2796,12 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- size = map_and_fenceable ? fence_size : obj->base.size;
+ size = map_and_fenceable ? fence_size : unfenced_size;
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->base.size >
+ if (size >
(map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
@@ -349,7 +349,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(obj);
- if (obj->gtt_offset & (unfenced_alignment - 1))
+ u32 unfenced_size =
+ i915_gem_get_unfenced_gtt_size(obj);
+ if (obj->gtt_space->size < unfenced_size ||
+ obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}