@@ -125,8 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (drm_mm_node_allocated(&obj->gtt_space))
- seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space.size);
+ seq_printf(m, " (gtt offset: %08lx, size: %08x)",
+ obj->gtt_space.start, (unsigned int)obj->gtt_space.size);
if (obj->pin_mappable || obj->fault_mappable)
seq_printf(m, " (mappable)");
if (obj->ring != NULL)
@@ -253,12 +253,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ obj->gtt_space.start);
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ obj->gtt_space.start);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -472,7 +474,7 @@ static void i915_dump_object(struct seq_file *m,
page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping,
- obj->gtt_offset + page * PAGE_SIZE);
+ obj->gtt_space.start + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem);
@@ -493,7 +495,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "--- gtt_offset = 0x%08lx\n",
+ obj->gtt_space.start);
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
}
}
@@ -683,7 +686,8 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->batchbuffer[i]) {
struct drm_i915_error_object *obj = error->batchbuffer[i];
- seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "--- gtt_offset = 0x%08x\n",
+ obj->gtt_offset);
offset = 0;
for (page = 0; page < obj->page_count; page++) {
for (elt = 0; elt < PAGE_SIZE/4; elt++) {
@@ -795,13 +795,6 @@ struct drm_i915_gem_object {
struct scatterlist *sg_list;
int num_sg;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t gtt_offset;
-
/* Which ring is refering to is this object */
struct intel_ring_buffer *ring;
@@ -88,10 +88,11 @@ static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
{
dev_priv->mm.gtt_count++;
dev_priv->mm.gtt_memory += obj->gtt_space.size;
- if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
+ if (obj->gtt_space.start < dev_priv->mm.gtt_mappable_end) {
dev_priv->mm.mappable_gtt_used +=
min_t(size_t, obj->gtt_space.size,
- dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
+ dev_priv->mm.gtt_mappable_end
+ - obj->gtt_space.start);
}
list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
}
@@ -101,10 +102,11 @@ static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
{
dev_priv->mm.gtt_count--;
dev_priv->mm.gtt_memory -= obj->gtt_space.size;
- if (obj->gtt_offset < dev_priv->mm.gtt_mappable_end) {
+ if (obj->gtt_space.start < dev_priv->mm.gtt_mappable_end) {
dev_priv->mm.mappable_gtt_used -=
min_t(size_t, obj->gtt_space.size,
- dev_priv->mm.gtt_mappable_end - obj->gtt_offset);
+ dev_priv->mm.gtt_mappable_end
+ - obj->gtt_space.start);
}
list_del_init(&obj->gtt_list);
}
@@ -691,7 +693,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- offset = obj->gtt_offset + args->offset;
+ offset = obj->gtt_space.start + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -776,7 +778,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
if (ret)
goto out_unpin_pages;
- offset = obj->gtt_offset + args->offset;
+ offset = obj->gtt_space.start + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -1317,7 +1319,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
- pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev->agp->base + obj->gtt_space.start) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -2237,7 +2239,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
obj->map_and_fenceable = true;
drm_mm_remove_node(&obj->gtt_space);
- obj->gtt_offset = 0;
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
@@ -2296,9 +2297,9 @@ static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj)
int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((obj->gtt_space.start + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= obj->gtt_space.start & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
@@ -2317,9 +2318,9 @@ static void i965_write_fence_reg(struct drm_i915_gem_object *obj)
int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((obj->gtt_space.start + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= obj->gtt_space.start & 0xfffff000;
val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2336,10 +2337,10 @@ static void i915_write_fence_reg(struct drm_i915_gem_object *obj)
uint32_t fence_reg, val, pitch_val;
int tile_width;
- if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj->gtt_offset & (size - 1))) {
- WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
- __func__, obj->gtt_offset, obj->map_and_fenceable, size,
+ if ((obj->gtt_space.start & ~I915_FENCE_START_MASK) ||
+ (obj->gtt_space.start & (size - 1))) {
+ WARN(1, "%s: object 0x%08lx [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
+ __func__, obj->gtt_space.start, obj->map_and_fenceable, size,
obj->gtt_space.start, obj->gtt_space.size);
return;
}
@@ -2360,7 +2361,7 @@ static void i915_write_fence_reg(struct drm_i915_gem_object *obj)
else
WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
- val = obj->gtt_offset;
+ val = obj->gtt_space.start;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2385,10 +2386,10 @@ static void i830_write_fence_reg(struct drm_i915_gem_object *obj)
uint32_t pitch_val;
uint32_t fence_size_bits;
- if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj->gtt_offset & (obj->base.size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj->gtt_offset);
+ if ((obj->gtt_space.start & ~I830_FENCE_START_MASK) ||
+ (obj->gtt_space.start & (obj->base.size - 1))) {
+ WARN(1, "%s: object 0x%08lx not 512K or size aligned\n",
+ __func__, obj->gtt_space.start);
return;
}
@@ -2396,7 +2397,7 @@ static void i830_write_fence_reg(struct drm_i915_gem_object *obj)
pitch_val = ffs(pitch_val) - 1;
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- val = obj->gtt_offset;
+ val = obj->gtt_space.start;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
fence_size_bits = I830_FENCE_SIZE_BITS(size);
@@ -2496,15 +2497,15 @@ i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
if (!obj->stride)
return -EINVAL;
WARN((obj->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj->gtt_offset);
+ "object 0x%08lx is X tiled but has non-512B pitch\n",
+ obj->gtt_space.start);
break;
case I915_TILING_Y:
if (!obj->stride)
return -EINVAL;
WARN((obj->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj->gtt_offset);
+ "object 0x%08lx is Y tiled but has non-128B pitch\n",
+ obj->gtt_space.start);
break;
}
@@ -2735,8 +2736,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
goto search_free;
}
- obj->gtt_offset = obj->gtt_space.start;
-
/* keep track of bounds object by adding it to the inactive list */
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
i915_gem_info_add_gtt(dev_priv, obj);
@@ -2748,14 +2747,15 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
+ trace_i915_gem_object_bind(obj, obj->gtt_space.start, map_and_fenceable);
fenceable =
obj->gtt_space.size == fence_size &&
(obj->gtt_space.start & (fence_alignment -1)) == 0;
mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+ obj->gtt_space.start + obj->base.size
+ <= dev_priv->mm.gtt_mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@@ -3294,7 +3294,7 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
target_handle = reloc.target_handle;
}
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ target_offset = to_intel_bo(target_obj)->gtt_space.start;
#if WATCH_RELOC
DRM_INFO("%s: obj %p offset %08x target %d "
@@ -3412,7 +3412,7 @@ i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
break;
/* Map the page containing the relocation we're going to perform. */
- reloc.offset += obj->gtt_offset;
+ reloc.offset += obj->gtt_space.start;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc.offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
@@ -3487,7 +3487,7 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
dev_priv->fence_regs[obj->fence_reg].gpu = true;
}
- entry->offset = obj->gtt_offset;
+ entry->offset = obj->gtt_space.start;
}
while (i--)
@@ -3803,7 +3803,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* Sanity check the batch buffer */
- exec_offset = batch_obj->gtt_offset;
+ exec_offset = batch_obj->gtt_space.start;
ret = i915_gem_check_execbuffer(args, exec_offset);
if (ret != 0) {
DRM_ERROR("execbuf with invalid offset/length\n");
@@ -4072,13 +4072,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
WARN_ON(i915_verify_lists(dev));
if (drm_mm_node_allocated(&obj->gtt_space)) {
- if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ if ((alignment && obj->gtt_space.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- obj->gtt_offset, alignment,
+ obj->gtt_space.start, alignment,
map_and_fenceable,
obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
@@ -4168,7 +4168,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj->gtt_offset;
+ args->offset = obj->gtt_space.start;
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -4468,7 +4468,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
if (ret)
goto err_unref;
- dev_priv->seqno_gfx_addr = obj->gtt_offset;
+ dev_priv->seqno_gfx_addr = obj->gtt_space.start;
dev_priv->seqno_page = kmap(obj->pages[0]);
if (dev_priv->seqno_page == NULL)
goto err_unpin;
@@ -157,7 +157,7 @@ i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
{
int page;
- DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
+ DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_space.start);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
@@ -171,7 +171,7 @@ i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
chunk_len = 128;
i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
- obj->gtt_offset +
+ obj->gtt_space.start +
page * PAGE_SIZE,
mark);
}
@@ -190,10 +190,10 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj->gtt_offset, handle,
+ __func__, obj, obj->gtt_space.start, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
+ gtt_mapping = ioremap(dev->agp->base + obj->gtt_space.start, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
@@ -217,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
- (int)(obj->gtt_offset +
+ (int)(obj->gtt_space.start +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
@@ -256,14 +256,14 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
while (size < obj->base.size)
size <<= 1;
- if (obj->gtt_offset & (size - 1))
+ if (obj->gtt_space.start & (size - 1))
return false;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ if (obj->gtt_space.start & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ if (obj->gtt_space.start & ~I830_FENCE_START_MASK)
return false;
}
@@ -439,7 +439,7 @@ i915_error_object_create(struct drm_device *dev,
if (dst == NULL)
return NULL;
- reloc_offset = src->gtt_offset;
+ reloc_offset = src->gtt_space.start;
for (page = 0; page < page_count; page++) {
unsigned long flags;
void __iomem *s;
@@ -461,7 +461,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
- dst->gtt_offset = src->gtt_offset;
+ dst->gtt_offset = src->gtt_space.start;
return dst;
@@ -631,13 +631,13 @@ static void i915_capture_error_state(struct drm_device *dev)
count = 0;
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj->gtt_offset &&
- bbaddr < obj->gtt_offset + obj->base.size)
+ bbaddr >= obj->gtt_space.start &&
+ bbaddr < obj->gtt_space.start + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj->gtt_offset &&
- error->acthd < obj->gtt_offset + obj->base.size)
+ error->acthd >= obj->gtt_space.start &&
+ error->acthd < obj->gtt_space.start + obj->base.size)
batchbuffer[1] = obj;
count++;
@@ -646,13 +646,13 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj->gtt_offset &&
- bbaddr < obj->gtt_offset + obj->base.size)
+ bbaddr >= obj->gtt_space.start &&
+ bbaddr < obj->gtt_space.start + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj->gtt_offset &&
- error->acthd < obj->gtt_offset + obj->base.size)
+ error->acthd >= obj->gtt_space.start &&
+ error->acthd < obj->gtt_space.start + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -662,13 +662,13 @@ static void i915_capture_error_state(struct drm_device *dev)
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj->gtt_offset &&
- bbaddr < obj->gtt_offset + obj->base.size)
+ bbaddr >= obj->gtt_space.start &&
+ bbaddr < obj->gtt_space.start + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj->gtt_offset &&
- error->acthd < obj->gtt_offset + obj->base.size)
+ error->acthd >= obj->gtt_space.start &&
+ error->acthd < obj->gtt_space.start + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -703,7 +703,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->active_bo[i].size = obj->base.size;
error->active_bo[i].name = obj->base.name;
error->active_bo[i].seqno = obj->last_rendering_seqno;
- error->active_bo[i].gtt_offset = obj->gtt_offset;
+ error->active_bo[i].gtt_offset = obj->gtt_space.start;
error->active_bo[i].read_domains = obj->base.read_domains;
error->active_bo[i].write_domain = obj->base.write_domain;
error->active_bo[i].fence_reg = obj->fence_reg;
@@ -929,10 +929,10 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
+ stall_detected = I915_READ(dspsurf) == obj->gtt_space.start;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_space.start +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -1232,7 +1232,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj->gtt_offset &&
+ dev_priv->cfb_offset == obj->gtt_space.start &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1244,7 +1244,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj->gtt_offset;
+ dev_priv->cfb_offset = obj->gtt_space.start;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
@@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_space.start | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -1549,7 +1549,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj->gtt_offset;
+ Start = obj->gtt_space.start;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -4371,7 +4371,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = obj->gtt_offset;
+ addr = obj->gtt_space.start;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -5135,7 +5135,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(obj->gtt_space.start + offset);
OUT_RING(MI_NOOP);
break;
@@ -5143,7 +5143,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(obj->gtt_space.start + offset);
OUT_RING(MI_NOOP);
break;
@@ -5156,7 +5156,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
+ OUT_RING(obj->gtt_space.start | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -5171,7 +5171,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
+ OUT_RING(obj->gtt_space.start);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5866,7 +5866,7 @@ void intel_init_clock_gating(struct drm_device *dev)
struct drm_i915_gem_object *obj = dev_priv->renderctx;
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj->gtt_offset |
+ OUT_RING(obj->gtt_space.start |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -5885,7 +5885,7 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj = dev_priv->pwrctx;
- I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, obj->gtt_space.start | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
}
@@ -6162,7 +6162,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj = dev_priv->renderctx;
- I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN);
+ I915_WRITE(CCID, obj->gtt_space.start &~ CCID_EN);
POSTING_READ(CCID);
i915_gem_object_unpin(obj);
@@ -6173,7 +6173,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj = dev_priv->pwrctx;
- I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN);
+ I915_WRITE(PWRCTXA, obj->gtt_space.start &~ PWRCTX_EN);
POSTING_READ(PWRCTXA);
i915_gem_object_unpin(obj);
@@ -132,10 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_space.start;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_space.start, size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
@@ -165,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
- obj->gtt_offset, obj);
+ obj->gtt_space.start, obj);
mutex_unlock(&dev->struct_mutex);
@@ -199,7 +199,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset);
+ overlay->reg_bo->gtt_space.start);
return regs;
}
@@ -823,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_space.start + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -837,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
- regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
+ regs->OBUF_0U = new_bo->gtt_space.start + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_space.start + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
}
@@ -1428,7 +1428,7 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->gtt_offset;
+ overlay->flip_addr = reg_bo->gtt_space.start;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1502,7 +1502,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset);
+ overlay->reg_bo->gtt_space.start);
return regs;
}
@@ -1535,7 +1535,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (long) overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = (long) overlay->reg_bo->gtt_offset;
+ error->base = (long) overlay->reg_bo->gtt_space.start;
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
@@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->write_tail(ring, 0);
/* Initialize the ring. */
- I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_START(ring, obj->gtt_space.start);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
/* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
- I915_READ_START(ring) != obj->gtt_offset ||
+ I915_READ_START(ring) != obj->gtt_space.start ||
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
if (IS_GEN6(ring->dev) && ring->dev->pdev->revision <= 8) {
/* Early revisions of Sandybridge do not like
@@ -564,7 +564,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
goto err_unref;
}
- ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.gfx_addr = obj->gtt_space.start;
ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -618,7 +618,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_unref;
ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj->gtt_offset;
+ ring->map.offset = dev->agp->base + obj->gtt_space.start;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
@@ -949,7 +949,7 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
return ret;
intel_ring_emit(ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_space.start);
return 0;
} else