@@ -153,6 +153,20 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static bool
+i915_gem_object_cpu_accessible(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ if (obj_priv->gtt_offset + obj->size > dev_priv->mm.gtt_mappable_end
+ && obj_priv->gtt_space)
+ return false;
+
+ return true;
+}
+
static inline int
fast_shmem_read(struct page **pages,
loff_t page_base, int page_offset,
@@ -1172,6 +1186,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
+ if (!i915_gem_object_cpu_accessible(obj))
+ i915_gem_object_unbind(obj);
+
if (!obj_priv->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
@@ -3197,6 +3214,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
int i, ret;
void __iomem *reloc_page;
bool need_fence;
+ bool need_mappable = entry->relocation_count ? true : false;
need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj_priv->tiling_mode != I915_TILING_NONE;
@@ -3211,7 +3229,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Choose the GTT offset for our buffer and put it there. */
- ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment, false);
+ ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment,
+ need_mappable);
if (ret)
return ret;
@@ -4067,7 +4086,8 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
if (obj_priv->gtt_space != NULL) {
if (alignment == 0)
alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
+ if (obj_priv->gtt_offset & (alignment - 1)
+ || (mappable && !i915_gem_object_cpu_accessible(obj))) {
WARN(obj_priv->pin_count,
"bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x\n",