Message ID | 1372375867-1003-31-git-send-email-ben@bwidawsk.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Thu, Jun 27, 2013 at 04:30:31PM -0700, Ben Widawsky wrote: > This will be handy when we add VMs. It's not strictly, necessary, but it > will make the code much cleaner. > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> You're going to hate, but this is patch ordering fail. Imo this should be one of the very first patches, at least before you kill obj->gtt_offset. To increase your hatred some more, I have bikesheds on the names, too. I think the best would be to respin this patch and merge it right away. It'll cause tons of conflicts. But keeping it as no. 30 in this series will be even worse, since merging the first 30 patches won't happen instantly. So much more potential for rebase hell imo. The MO for when you stumble over such a giant renaming operation should be imo to submit the "add inline abstraction functions" patch(es) right away. That way everyone else who potentially works in the same area also gets a heads up. > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index bc80ce0..56d47bc 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1349,6 +1349,27 @@ struct drm_i915_gem_object { > > #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) > > +static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o) > +{ > + return o->gtt_space->start; > +} To differentiate from the ppgtt offset I'd call this i915_gem_obj_ggtt_offset. > + > +static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o) > +{ > + return o->gtt_space != NULL; > +} Same here, I think we want ggtt inserted. > + > +static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o) > +{ > + return o->gtt_space->size; > +} This is even more misleading and the real reasons I vote for all the ggtt bikesheds: ggtt_size != obj->size is very much possible (on gen2/3 only though). We use that to satisfy alignment/size constraints on tiled objects. So the i915_gem_obj_ggtt_size rename is mandatory here. > + > +static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o, > + enum i915_cache_level color) > +{ > + o->gtt_space->color = color; > +} Dito for consistency. Cheers, Daniel > + > /** > * Request queue structure. > * > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index d747a1f..dd2228d 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) > static inline bool > i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) > { > - return obj->gtt_space && !obj->active; > + return i915_gem_obj_bound(obj) && !obj->active; > } > > int > @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, > mutex_lock(&dev->struct_mutex); > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) > if (obj->pin_count) > - pinned += obj->gtt_space->size; > + pinned += i915_gem_obj_size(obj); > mutex_unlock(&dev->struct_mutex); > > args->aper_size = i915_gtt_vm->total; > @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, > * anyway again before the next pread happens. */ > if (obj->cache_level == I915_CACHE_NONE) > needs_clflush = 1; > - if (obj->gtt_space) { > + if (i915_gem_obj_bound(obj)) { > ret = i915_gem_object_set_to_gtt_domain(obj, false); > if (ret) > return ret; > @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, > user_data = to_user_ptr(args->data_ptr); > remain = args->size; > > - offset = obj->gtt_space->start + args->offset; > + offset = i915_gem_obj_offset(obj) + args->offset; > > while (remain > 0) { > /* Operation in this page > @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, > * right away and we therefore have to clflush anyway. */ > if (obj->cache_level == I915_CACHE_NONE) > needs_clflush_after = 1; > - if (obj->gtt_space) { > + if (i915_gem_obj_bound(obj)) { > ret = i915_gem_object_set_to_gtt_domain(obj, true); > if (ret) > return ret; > @@ -1361,7 +1361,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) > > obj->fault_mappable = true; > > - pfn += (obj->gtt_space->start >> PAGE_SHIFT) + page_offset; > + pfn += (i915_gem_obj_offset(obj) >> PAGE_SHIFT) + page_offset; > > /* Finally, remap it using the new GTT offset */ > ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); > @@ -1667,7 +1667,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) > if (obj->pages == NULL) > return 0; > > - BUG_ON(obj->gtt_space); > + BUG_ON(i915_gem_obj_bound(obj)); > > if (obj->pages_pin_count) > return -EBUSY; > @@ -2587,7 +2587,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) > drm_i915_private_t *dev_priv = obj->base.dev->dev_private; > int ret; > > - if (obj->gtt_space == NULL) > + if (!i915_gem_obj_bound(obj)) > return 0; > > if (obj->pin_count) > @@ -2669,11 +2669,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, > } > > if (obj) { > - u32 size = obj->gtt_space->size; > + u32 size = i915_gem_obj_size(obj); > > - val = (uint64_t)((obj->gtt_space->start + size - 4096) & > + val = (uint64_t)((i915_gem_obj_offset(obj) + size - 4096) & > 0xfffff000) << 32; > - val |= obj->gtt_space->start & 0xfffff000; > + val |= i915_gem_obj_offset(obj) & 0xfffff000; > val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; > if (obj->tiling_mode == I915_TILING_Y) > val |= 1 << I965_FENCE_TILING_Y_SHIFT; > @@ -2693,15 +2693,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, > u32 val; > > if (obj) { > - u32 size = obj->gtt_space->size; > + u32 size = i915_gem_obj_size(obj); > int pitch_val; > int tile_width; > > - WARN((obj->gtt_space->start & ~I915_FENCE_START_MASK) || > + WARN((i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) || > (size & -size) != size || > - (obj->gtt_space->start & (size - 1)), > + (i915_gem_obj_offset(obj) & (size - 1)), > "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", > - obj->gtt_space->start, obj->map_and_fenceable, size); > + i915_gem_obj_offset(obj), obj->map_and_fenceable, size); > > if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) > tile_width = 128; > @@ -2712,7 +2712,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, > pitch_val = obj->stride / tile_width; > pitch_val = ffs(pitch_val) - 1; > > - val = obj->gtt_space->start; > + val = i915_gem_obj_offset(obj); > if (obj->tiling_mode == I915_TILING_Y) > val |= 1 << I830_FENCE_TILING_Y_SHIFT; > val |= I915_FENCE_SIZE_BITS(size); > @@ -2737,19 +2737,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, > uint32_t val; > > if (obj) { > - u32 size = obj->gtt_space->size; > + u32 size = i915_gem_obj_size(obj); > uint32_t pitch_val; > > - WARN((obj->gtt_space->start & ~I830_FENCE_START_MASK) || > + WARN((i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) || > (size & -size) != size || > - (obj->gtt_space->start & (size - 1)), > + (i915_gem_obj_offset(obj) & (size - 1)), > "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", > - obj->gtt_space->start, size); > + i915_gem_obj_offset(obj), size); > > pitch_val = obj->stride / 128; > pitch_val = ffs(pitch_val) - 1; > > - val = obj->gtt_space->start; > + val = i915_gem_obj_offset(obj); > if (obj->tiling_mode == I915_TILING_Y) > val |= 1 << I830_FENCE_TILING_Y_SHIFT; > val |= I830_FENCE_SIZE_BITS(size); > @@ -3030,6 +3030,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev) > int err = 0; > > list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { > + unsigned long obj_offset = i915_gem_obj_offset(obj); > if (obj->gtt_space == NULL) { > printk(KERN_ERR "object found on GTT list with no space reserved\n"); > err++; > @@ -3038,8 +3039,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) > > if (obj->cache_level != obj->gtt_space->color) { > printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", > - obj->gtt_space->start, > - obj->gtt_space->start + obj->gtt_space->size, > + obj_offset, > + obj_offset + i915_gem_obj_size(obj), > obj->cache_level, > obj->gtt_space->color); > err++; > @@ -3050,8 +3051,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) > obj->gtt_space, > obj->cache_level)) { > printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", > - obj->gtt_space->start, > - obj->gtt_space->start + obj->gtt_space->size, > + obj_offset, > + obj_offset + i915_gem_obj_size(obj), > obj->cache_level); > err++; > continue; > @@ -3267,7 +3268,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) > int ret; > > /* Not valid to be called on unbound objects. */ > - if (obj->gtt_space == NULL) > + if (!i915_gem_obj_bound(obj)) > return -EINVAL; > > if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) > @@ -3332,7 +3333,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > return ret; > } > > - if (obj->gtt_space) { > + if (i915_gem_obj_bound(obj)) { > ret = i915_gem_object_finish_gpu(obj); > if (ret) > return ret; > @@ -3355,7 +3356,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, > i915_ppgtt_bind_object(dev_priv->gtt.aliasing_ppgtt, > obj, cache_level); > > - obj->gtt_space->color = cache_level; > + i915_gem_obj_set_color(obj, cache_level); > } > > if (cache_level == I915_CACHE_NONE) { > @@ -3636,14 +3637,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, > if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) > return -EBUSY; > > - if (obj->gtt_space != NULL) { > - if ((alignment && obj->gtt_space->start & (alignment - 1)) || > + if (i915_gem_obj_bound(obj)) { > + if ((alignment && i915_gem_obj_offset(obj) & (alignment - 1)) || > (map_and_fenceable && !obj->map_and_fenceable)) { > WARN(obj->pin_count, > "bo is already pinned with incorrect alignment:" > " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," > " obj->map_and_fenceable=%d\n", > - obj->gtt_space->start, alignment, > + i915_gem_obj_offset(obj), alignment, > map_and_fenceable, > obj->map_and_fenceable); > ret = i915_gem_object_unbind(obj); > @@ -3652,7 +3653,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, > } > } > > - if (obj->gtt_space == NULL) { > + if (!i915_gem_obj_bound(obj)) { > struct drm_i915_private *dev_priv = obj->base.dev->dev_private; > > ret = i915_gem_object_bind_to_gtt(obj, alignment, > @@ -3678,7 +3679,7 @@ void > i915_gem_object_unpin(struct drm_i915_gem_object *obj) > { > BUG_ON(obj->pin_count == 0); > - BUG_ON(obj->gtt_space == NULL); > + BUG_ON(!i915_gem_obj_bound(obj)); > > if (--obj->pin_count == 0) > obj->pin_mappable = false; > @@ -3728,7 +3729,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, > * as the X server doesn't manage domains yet > */ > i915_gem_object_flush_cpu_write_domain(obj); > - args->offset = obj->gtt_space->start; > + args->offset = i915_gem_obj_offset(obj); > out: > drm_gem_object_unreference(&obj->base); > unlock: > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index 1e838f4..75b4e27 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -395,7 +395,7 @@ mi_set_context(struct intel_ring_buffer *ring, > > intel_ring_emit(ring, MI_NOOP); > intel_ring_emit(ring, MI_SET_CONTEXT); > - intel_ring_emit(ring, new_context->obj->gtt_space->start | > + intel_ring_emit(ring, i915_gem_obj_offset(new_context->obj) | > MI_MM_SPACE_GTT | > MI_SAVE_EXT_STATE_EN | > MI_RESTORE_EXT_STATE_EN | > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index 67246a6..837372d 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, > return -ENOENT; > > target_i915_obj = to_intel_bo(target_obj); > - target_offset = target_i915_obj->gtt_space->start; > + target_offset = i915_gem_obj_offset(target_i915_obj); > > /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and > * pipe_control writes because the gpu doesn't properly redirect them > @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, > return ret; > > /* Map the page containing the relocation we're going to perform. */ > - reloc->offset += obj->gtt_space->start; > + reloc->offset += i915_gem_obj_offset(obj); > reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, > reloc->offset & PAGE_MASK); > reloc_entry = (uint32_t __iomem *) > @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, > obj->has_aliasing_ppgtt_mapping = 1; > } > > - if (entry->offset != obj->gtt_space->start) { > - entry->offset = obj->gtt_space->start; > + if (entry->offset != i915_gem_obj_offset(obj)) { > + entry->offset = i915_gem_obj_offset(obj); > *need_reloc = true; > } > > @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) > { > struct drm_i915_gem_exec_object2 *entry; > > - if (!obj->gtt_space) > + if (!i915_gem_obj_bound(obj)) > return; > > entry = obj->exec_entry; > @@ -528,11 +528,13 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, > /* Unbind any ill-fitting objects or pin. */ > list_for_each_entry(obj, objects, exec_list) { > struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; > + unsigned long obj_offset; > bool need_fence, need_mappable; > > - if (!obj->gtt_space) > + if (!i915_gem_obj_bound(obj)) > continue; > > + obj_offset = i915_gem_obj_offset(obj); > need_fence = > has_fenced_gpu_access && > entry->flags & EXEC_OBJECT_NEEDS_FENCE && > @@ -540,7 +542,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, > need_mappable = need_fence || need_reloc_mappable(obj); > > if ((entry->alignment && > - obj->gtt_space->start & (entry->alignment - 1)) || > + obj_offset & (entry->alignment - 1)) || > (need_mappable && !obj->map_and_fenceable)) > ret = i915_gem_object_unbind(obj); > else > @@ -551,7 +553,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, > > /* Bind fresh objects */ > list_for_each_entry(obj, objects, exec_list) { > - if (obj->gtt_space) > + if (i915_gem_obj_bound(obj)) > continue; > > ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); > @@ -1072,7 +1074,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, > goto err; > } > > - exec_start = batch_obj->gtt_space->start + args->batch_start_offset; > + exec_start = i915_gem_obj_offset(batch_obj) + args->batch_start_offset; > exec_len = args->batch_len; > if (cliprects) { > for (i = 0; i < args->num_cliprects; i++) { > diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c > index 17e334f..566ab76 100644 > --- a/drivers/gpu/drm/i915/i915_gem_gtt.c > +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c > @@ -390,7 +390,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, > enum i915_cache_level cache_level) > { > ppgtt->base.insert_entries(&ppgtt->base, obj->pages, > - obj->gtt_space->start >> PAGE_SHIFT, > + i915_gem_obj_offset(obj) >> PAGE_SHIFT, > cache_level); > } > > @@ -398,7 +398,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, > struct drm_i915_gem_object *obj) > { > ppgtt->base.clear_range(&ppgtt->base, > - obj->gtt_space->start >> PAGE_SHIFT, > + i915_gem_obj_offset(obj) >> PAGE_SHIFT, > obj->base.size >> PAGE_SHIFT); > } > > @@ -570,9 +570,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, > { > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > + unsigned long obj_offset = i915_gem_obj_offset(obj); > > i915_gtt_vm->insert_entries(&dev_priv->gtt.base, obj->pages, > - obj->gtt_space->start >> PAGE_SHIFT, > + obj_offset >> PAGE_SHIFT, > cache_level); > > obj->has_global_gtt_mapping = 1; > @@ -582,9 +583,10 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) > { > struct drm_device *dev = obj->base.dev; > struct drm_i915_private *dev_priv = dev->dev_private; > + unsigned long obj_offset = i915_gem_obj_offset(obj); > > i915_gtt_vm->clear_range(&dev_priv->gtt.base, > - obj->gtt_space->start >> PAGE_SHIFT, > + obj_offset >> PAGE_SHIFT, > obj->base.size >> PAGE_SHIFT); > > obj->has_global_gtt_mapping = 0; > @@ -682,7 +684,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, > uintptr_t gtt_offset = (uintptr_t)obj->gtt_space; > int ret; > DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", > - obj->gtt_space->start, obj->base.size); > + i915_gem_obj_offset(obj), obj->base.size); > > BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0); > gtt_offset = gtt_offset & ~I915_GTT_RESERVED; > diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c > index 7aab12a..2478114 100644 > --- a/drivers/gpu/drm/i915/i915_gem_tiling.c > +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c > @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) > return true; > > if (INTEL_INFO(obj->base.dev)->gen == 3) { > - if (obj->gtt_space->start & ~I915_FENCE_START_MASK) > + if (i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) > return false; > } else { > - if (obj->gtt_space->start & ~I830_FENCE_START_MASK) > + if (i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) > return false; > } > > size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); > - if (obj->gtt_space->size != size) > + if (i915_gem_obj_size(obj) != size) > return false; > > - if (obj->gtt_space->start & (size - 1)) > + if (i915_gem_obj_offset(obj) & (size - 1)) > return false; > > return true; > @@ -358,8 +358,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, > * whilst executing a fenced command for an untiled object. > */ > > - obj->map_and_fenceable = obj->gtt_space == NULL || > - (obj->gtt_space->start + > + obj->map_and_fenceable = !i915_gem_obj_bound(obj) || > + (i915_gem_obj_offset(obj) + > obj->base.size <= dev_priv->gtt.mappable_end && > i915_gem_object_fence_ok(obj, args->tiling_mode)); > > @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, > i915_gem_get_gtt_alignment(dev, obj->base.size, > args->tiling_mode, > false); > - if (obj->gtt_space->start & (unfenced_alignment - 1)) > + if (i915_gem_obj_offset(obj) & (unfenced_alignment - 1)) > ret = i915_gem_object_unbind(obj); > } > > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index 2c4fe36..c0be641 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -1512,7 +1512,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, > if (dst == NULL) > return NULL; > > - reloc_offset = src->gtt_space->start; > + reloc_offset = i915_gem_obj_offset(src); > for (i = 0; i < num_pages; i++) { > unsigned long flags; > void *d; > @@ -1564,7 +1564,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, > reloc_offset += PAGE_SIZE; > } > dst->page_count = num_pages; > - dst->gtt_offset = src->gtt_space->start; > + dst->gtt_offset = i915_gem_obj_offset(src); > > return dst; > > @@ -1618,7 +1618,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, > err->name = obj->base.name; > err->rseqno = obj->last_read_seqno; > err->wseqno = obj->last_write_seqno; > - err->gtt_offset = obj->gtt_space->start; > + err->gtt_offset = i915_gem_obj_offset(obj); > err->read_domains = obj->base.read_domains; > err->write_domain = obj->base.write_domain; > err->fence_reg = obj->fence_reg; > @@ -1716,8 +1716,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, > return NULL; > > obj = ring->private; > - if (acthd >= obj->gtt_space->start && > - acthd < obj->gtt_space->start + obj->base.size) > + if (acthd >= i915_gem_obj_offset(obj) && > + acthd < i915_gem_obj_offset(obj) + obj->base.size) > return i915_error_object_create(dev_priv, obj); > } > > @@ -1798,7 +1798,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, > return; > > list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { > - if ((error->ccid & PAGE_MASK) == obj->gtt_space->start) { > + if ((error->ccid & PAGE_MASK) == i915_gem_obj_offset(obj)) { > ering->ctx = i915_error_object_create_sized(dev_priv, > obj, 1); > } > @@ -2152,10 +2152,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in > if (INTEL_INFO(dev)->gen >= 4) { > int dspsurf = DSPSURF(intel_crtc->plane); > stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == > - obj->gtt_space->start; > + i915_gem_obj_offset(obj); > } else { > int dspaddr = DSPADDR(intel_crtc->plane); > - stall_detected = I915_READ(dspaddr) == (obj->gtt_space->start + > + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_offset(obj) + > crtc->y * crtc->fb->pitches[0] + > crtc->x * crtc->fb->bits_per_pixel/8); > } > diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h > index 3db4a68..e4dccb3 100644 > --- a/drivers/gpu/drm/i915/i915_trace.h > +++ b/drivers/gpu/drm/i915/i915_trace.h > @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, > > TP_fast_assign( > __entry->obj = obj; > - __entry->offset = obj->gtt_space->start; > - __entry->size = obj->gtt_space->size; > + __entry->offset = i915_gem_obj_offset(obj); > + __entry->size = i915_gem_obj_size(obj); > __entry->mappable = mappable; > ), > > @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, > > TP_fast_assign( > __entry->obj = obj; > - __entry->offset = obj->gtt_space->start; > - __entry->size = obj->gtt_space->size; > + __entry->offset = i915_gem_obj_offset(obj); > + __entry->size = i915_gem_obj_size(obj); > ), > > TP_printk("obj=%p, offset=%08x size=%x", > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index a269d7a..633bfbf 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -1943,18 +1943,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, > } > > DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", > - obj->gtt_space->start, linear_offset, x, y, > + i915_gem_obj_offset(obj), linear_offset, x, y, > fb->pitches[0]); > I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); > if (INTEL_INFO(dev)->gen >= 4) { > I915_MODIFY_DISPBASE(DSPSURF(plane), > - obj->gtt_space->start + > + i915_gem_obj_offset(obj) + > intel_crtc->dspaddr_offset); > I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); > I915_WRITE(DSPLINOFF(plane), linear_offset); > } else > I915_WRITE(DSPADDR(plane), > - obj->gtt_space->start + linear_offset); > + i915_gem_obj_offset(obj) + linear_offset); > POSTING_READ(reg); > > return 0; > @@ -2035,11 +2035,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc, > linear_offset -= intel_crtc->dspaddr_offset; > > DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", > - obj->gtt_space->start, linear_offset, x, y, > + i915_gem_obj_offset(obj), linear_offset, x, y, > fb->pitches[0]); > I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); > I915_MODIFY_DISPBASE(DSPSURF(plane), > - obj->gtt_space->start+intel_crtc->dspaddr_offset); > + i915_gem_obj_offset(obj)+intel_crtc->dspaddr_offset); > if (IS_HASWELL(dev)) { > I915_WRITE(DSPOFFSET(plane), (y << 16) | x); > } else { > @@ -6558,7 +6558,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, > goto fail_unpin; > } > > - addr = obj->gtt_space->start; > + addr = i915_gem_obj_offset(obj); > } else { > int align = IS_I830(dev) ? 16 * 1024 : 256; > ret = i915_gem_attach_phys_object(dev, obj, > @@ -7274,7 +7274,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); > intel_ring_emit(ring, fb->pitches[0]); > intel_ring_emit(ring, > - obj->gtt_space->start + intel_crtc->dspaddr_offset); > + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); > intel_ring_emit(ring, 0); /* aux display base address, unused */ > > intel_mark_page_flip_active(intel_crtc); > @@ -7316,7 +7316,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); > intel_ring_emit(ring, fb->pitches[0]); > intel_ring_emit(ring, > - obj->gtt_space->start + intel_crtc->dspaddr_offset); > + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); > intel_ring_emit(ring, MI_NOOP); > > intel_mark_page_flip_active(intel_crtc); > @@ -7356,7 +7356,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); > intel_ring_emit(ring, fb->pitches[0]); > intel_ring_emit(ring, > - (obj->gtt_space->start + intel_crtc->dspaddr_offset) | > + (i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset) | > obj->tiling_mode); > > /* XXX Enabling the panel-fitter across page-flip is so far > @@ -7400,7 +7400,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, > MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); > intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); > intel_ring_emit(ring, > - obj->gtt_space->start + intel_crtc->dspaddr_offset); > + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); > > /* Contrary to the suggestions in the documentation, > * "Enable Panel Fitter" does not seem to be required when page > @@ -7466,7 +7466,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, > intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); > intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); > intel_ring_emit(ring, > - obj->gtt_space->start + intel_crtc->dspaddr_offset); > + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); > intel_ring_emit(ring, (MI_NOOP)); > > intel_mark_page_flip_active(intel_crtc); > diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c > index 242a793..8315a5e 100644 > --- a/drivers/gpu/drm/i915/intel_fb.c > +++ b/drivers/gpu/drm/i915/intel_fb.c > @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, > info->apertures->ranges[0].base = dev->mode_config.fb_base; > info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; > > - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_space->start; > + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_offset(obj); > info->fix.smem_len = size; > > info->screen_base = > - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_space->start, > + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj), > size); > if (!info->screen_base) { > ret = -ENOSPC; > @@ -168,7 +168,7 @@ static int intelfb_create(struct drm_fb_helper *helper, > > DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", > fb->width, fb->height, > - obj->gtt_space->start, obj); > + i915_gem_obj_offset(obj), obj); > > > mutex_unlock(&dev->struct_mutex); > diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c > index 93f2671..41654b1 100644 > --- a/drivers/gpu/drm/i915/intel_overlay.c > +++ b/drivers/gpu/drm/i915/intel_overlay.c > @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) > regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; > else > regs = io_mapping_map_wc(dev_priv->gtt.mappable, > - overlay->reg_bo->gtt_space->start); > + i915_gem_obj_offset(overlay->reg_bo)); > > return regs; > } > @@ -740,7 +740,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, > swidth = params->src_w; > swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); > sheight = params->src_h; > - iowrite32(new_bo->gtt_space->start + params->offset_Y, ®s->OBUF_0Y); > + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_Y, > + ®s->OBUF_0Y); > ostride = params->stride_Y; > > if (params->format & I915_OVERLAY_YUV_PLANAR) { > @@ -754,9 +755,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, > params->src_w/uv_hscale); > swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; > sheight |= (params->src_h/uv_vscale) << 16; > - iowrite32(new_bo->gtt_space->start + params->offset_U, > + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_U, > ®s->OBUF_0U); > - iowrite32(new_bo->gtt_space->start + params->offset_V, > + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_V, > ®s->OBUF_0V); > ostride |= params->stride_UV << 16; > } > @@ -1357,7 +1358,7 @@ void intel_setup_overlay(struct drm_device *dev) > DRM_ERROR("failed to pin overlay register bo\n"); > goto out_free_bo; > } > - overlay->flip_addr = reg_bo->gtt_space->start; > + overlay->flip_addr = i915_gem_obj_offset(reg_bo); > > ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); > if (ret) { > @@ -1436,7 +1437,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) > overlay->reg_bo->phys_obj->handle->vaddr; > > return io_mapping_map_atomic_wc(dev_priv->gtt.mappable, > - overlay->reg_bo->gtt_space->start); > + i915_gem_obj_offset(overlay->reg_bo)); > } > > static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, > @@ -1467,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) > if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) > error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; > else > - error->base = overlay->reg_bo->gtt_space->start; > + error->base = i915_gem_obj_offset(overlay->reg_bo); > > regs = intel_overlay_map_regs_atomic(overlay); > if (!regs) > diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c > index 73c0ee1..504d96b 100644 > --- a/drivers/gpu/drm/i915/intel_pm.c > +++ b/drivers/gpu/drm/i915/intel_pm.c > @@ -217,7 +217,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) > (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | > (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); > I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); > - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_space->start | ILK_FBC_RT_VALID); > + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_offset(obj) | ILK_FBC_RT_VALID); > /* enable it... */ > I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); > > @@ -3685,7 +3685,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) > > intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); > intel_ring_emit(ring, MI_SET_CONTEXT); > - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_space->start | > + intel_ring_emit(ring, i915_gem_obj_offset(dev_priv->ips.renderctx) | > MI_MM_SPACE_GTT | > MI_SAVE_EXT_STATE_EN | > MI_RESTORE_EXT_STATE_EN | > @@ -3708,7 +3708,8 @@ static void ironlake_enable_rc6(struct drm_device *dev) > return; > } > > - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_space->start | PWRCTX_EN); > + I915_WRITE(PWRCTXA, i915_gem_obj_offset(dev_priv->ips.pwrctx) | > + PWRCTX_EN); > I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); > } > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index c4c80c2..64b579f 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) > * registers with the above sequence (the readback of the HEAD registers > * also enforces ordering), otherwise the hw might lose the new ring > * register values. */ > - I915_WRITE_START(ring, obj->gtt_space->start); > + I915_WRITE_START(ring, i915_gem_obj_offset(obj)); > I915_WRITE_CTL(ring, > ((ring->size - PAGE_SIZE) & RING_NR_PAGES) > | RING_VALID); > > /* If the head is still not zero, the ring is dead */ > if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && > - I915_READ_START(ring) == obj->gtt_space->start && > + I915_READ_START(ring) == i915_gem_obj_offset(obj) && > (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { > DRM_ERROR("%s initialization failed " > "ctl %08x head %08x tail %08x start %08x\n", > @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring) > if (ret) > goto err_unref; > > - pc->gtt_offset = obj->gtt_space->start; > + pc->gtt_offset = i915_gem_obj_offset(obj); > pc->cpu_page = kmap(sg_page(obj->pages->sgl)); > if (pc->cpu_page == NULL) { > ret = -ENOMEM; > @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, > intel_ring_advance(ring); > } else { > struct drm_i915_gem_object *obj = ring->private; > - u32 cs_offset = obj->gtt_space->start; > + u32 cs_offset = i915_gem_obj_offset(obj); > > if (len > I830_BATCH_LIMIT) > return -ENOSPC; > @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring) > goto err_unref; > } > > - ring->status_page.gfx_addr = obj->gtt_space->start; > + ring->status_page.gfx_addr = i915_gem_obj_offset(obj); > ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); > if (ring->status_page.page_addr == NULL) { > ret = -ENOMEM; > @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, > goto err_unpin; > > ring->virtual_start = > - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_space->start, > + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj), > ring->size); > if (ring->virtual_start == NULL) { > DRM_ERROR("Failed to map ringbuffer.\n"); > diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c > index c342571..117a2f8 100644 > --- a/drivers/gpu/drm/i915/intel_sprite.c > +++ b/drivers/gpu/drm/i915/intel_sprite.c > @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, > > I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); > I915_WRITE(SPCNTR(pipe, plane), sprctl); > - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_space->start + > + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_offset(obj) + > sprsurf_offset); > POSTING_READ(SPSURF(pipe, plane)); > } > @@ -309,7 +309,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, > I915_WRITE(SPRSCALE(pipe), sprscale); > I915_WRITE(SPRCTL(pipe), sprctl); > I915_MODIFY_DISPBASE(SPRSURF(pipe), > - obj->gtt_space->start + sprsurf_offset); > + i915_gem_obj_offset(obj) + sprsurf_offset); > POSTING_READ(SPRSURF(pipe)); > > /* potentially re-enable LP watermarks */ > @@ -480,7 +480,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, > I915_WRITE(DVSSCALE(pipe), dvsscale); > I915_WRITE(DVSCNTR(pipe), dvscntr); > I915_MODIFY_DISPBASE(DVSSURF(pipe), > - obj->gtt_space->start + dvssurf_offset); > + i915_gem_obj_offset(obj) + dvssurf_offset); > POSTING_READ(DVSSURF(pipe)); > } > > -- > 1.8.3.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Sun, Jun 30, 2013 at 03:00:05PM +0200, Daniel Vetter wrote: > On Thu, Jun 27, 2013 at 04:30:31PM -0700, Ben Widawsky wrote: > > This will be handy when we add VMs. It's not strictly, necessary, but it > > will make the code much cleaner. > > > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> > > You're going to hate, but this is patch ordering fail. Imo this should be > one of the very first patches, at least before you kill obj->gtt_offset. > > To increase your hatred some more, I have bikesheds on the names, too. > > I think the best would be to respin this patch and merge it right away. > It'll cause tons of conflicts. But keeping it as no. 30 in this series > will be even worse, since merging the first 30 patches won't happen > instantly. So much more potential for rebase hell imo. > > The MO for when you stumble over such a giant renaming operation should be > imo to submit the "add inline abstraction functions" patch(es) right away. > That way everyone else who potentially works in the same area also gets a > heads up. > > > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > > index bc80ce0..56d47bc 100644 > > --- a/drivers/gpu/drm/i915/i915_drv.h > > +++ b/drivers/gpu/drm/i915/i915_drv.h > > @@ -1349,6 +1349,27 @@ struct drm_i915_gem_object { > > > > #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) > > > > +static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o) > > +{ > > + return o->gtt_space->start; > > +} > > To differentiate from the ppgtt offset I'd call this > i915_gem_obj_ggtt_offset. > > > + > > +static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o) > > +{ > > + return o->gtt_space != NULL; > > +} > > Same here, I think we want ggtt inserted. > > > + > > +static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o) > > +{ > > + return o->gtt_space->size; > > +} > > This is even more misleading and the real reasons I vote for all the ggtt > bikesheds: ggtt_size != obj->size is very much possible (on gen2/3 only > though). We use that to satisfy alignment/size constraints on tiled > objects. So the i915_gem_obj_ggtt_size rename is mandatory here. > > > + > > +static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o, > > + enum i915_cache_level color) > > +{ > > + o->gtt_space->color = color; > > +} > > Dito for consistency. > > Cheers, Daniel > > All of this is addressed in future patches. As we've discussed, I think I'll have to respin it anyway, so I'll name it as such upfront. To me it felt a little weird to start calling things "ggtt" before I made the separation. [snip]
On Mon, Jul 1, 2013 at 8:32 PM, Ben Widawsky <ben@bwidawsk.net> wrote: > On Sun, Jun 30, 2013 at 03:00:05PM +0200, Daniel Vetter wrote: >> On Thu, Jun 27, 2013 at 04:30:31PM -0700, Ben Widawsky wrote: >> > This will be handy when we add VMs. It's not strictly, necessary, but it >> > will make the code much cleaner. >> > >> > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> >> >> You're going to hate, but this is patch ordering fail. Imo this should be >> one of the very first patches, at least before you kill obj->gtt_offset. >> >> To increase your hatred some more, I have bikesheds on the names, too. >> >> I think the best would be to respin this patch and merge it right away. >> It'll cause tons of conflicts. But keeping it as no. 30 in this series >> will be even worse, since merging the first 30 patches won't happen >> instantly. So much more potential for rebase hell imo. >> >> The MO for when you stumble over such a giant renaming operation should be >> imo to submit the "add inline abstraction functions" patch(es) right away. >> That way everyone else who potentially works in the same area also gets a >> heads up. >> >> >> > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h >> > index bc80ce0..56d47bc 100644 >> > --- a/drivers/gpu/drm/i915/i915_drv.h >> > +++ b/drivers/gpu/drm/i915/i915_drv.h >> > @@ -1349,6 +1349,27 @@ struct drm_i915_gem_object { >> > >> > #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) >> > >> > +static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o) >> > +{ >> > + return o->gtt_space->start; >> > +} >> >> To differentiate from the ppgtt offset I'd call this >> i915_gem_obj_ggtt_offset. >> >> > + >> > +static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o) >> > +{ >> > + return o->gtt_space != NULL; >> > +} >> >> Same here, I think we want ggtt inserted. >> >> > + >> > +static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o) >> > +{ >> > + return o->gtt_space->size; >> > +} >> >> This is even more misleading and the real reasons I vote for all the ggtt >> bikesheds: ggtt_size != obj->size is very much possible (on gen2/3 only >> though). We use that to satisfy alignment/size constraints on tiled >> objects. So the i915_gem_obj_ggtt_size rename is mandatory here. >> >> > + >> > +static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o, >> > + enum i915_cache_level color) >> > +{ >> > + o->gtt_space->color = color; >> > +} >> >> Dito for consistency. >> >> Cheers, Daniel >> >> > All of this is addressed in future patches. As we've discussed, I think > I'll have to respin it anyway, so I'll name it as such upfront. To me it > felt a little weird to start calling things "ggtt" before I made the > separation. I think now that we know what the end result should (more or less at least) look like we can aim to make it right the first time we touch a piece of code. That will reduce the churn in the patch series and so make the beast easier to review. Imo foreshadowing (to keep consistent with the "a patch series should tell a story" analogy) is perfectly fine, and in many cases helps in understanding the big picture of a large pile of patches. -Daniel -- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch
On Mon, Jul 1, 2013 at 8:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote: >> All of this is addressed in future patches. As we've discussed, I think >> I'll have to respin it anyway, so I'll name it as such upfront. To me it >> felt a little weird to start calling things "ggtt" before I made the >> separation. > > I think now that we know what the end result should (more or less at > least) look like we can aim to make it right the first time we touch a > piece of code. That will reduce the churn in the patch series and so > make the beast easier to review. > > Imo foreshadowing (to keep consistent with the "a patch series should > tell a story" analogy) is perfectly fine, and in many cases helps in > understanding the big picture of a large pile of patches. I've forgotten to add one thing: If you switch these again later on (layz me didn't check for that) it's imo best to stick with those names (presuming they fit, since the gtt_size vs. obj->size disdinction is a rather important one). Again I think now that we know where to go to it's best to get there with as few intermediate steps as possible. -Daniel -- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch
On Mon, Jul 01, 2013 at 09:08:58PM +0200, Daniel Vetter wrote: > On Mon, Jul 1, 2013 at 8:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote: > >> All of this is addressed in future patches. As we've discussed, I think > >> I'll have to respin it anyway, so I'll name it as such upfront. To me it > >> felt a little weird to start calling things "ggtt" before I made the > >> separation. > > > > I think now that we know what the end result should (more or less at > > least) look like we can aim to make it right the first time we touch a > > piece of code. That will reduce the churn in the patch series and so > > make the beast easier to review. > > > > Imo foreshadowing (to keep consistent with the "a patch series should > > tell a story" analogy) is perfectly fine, and in many cases helps in > > understanding the big picture of a large pile of patches. > > I've forgotten to add one thing: If you switch these again later on > (layz me didn't check for that) it's imo best to stick with those > names (presuming they fit, since the gtt_size vs. obj->size > disdinction is a rather important one). Again I think now that we know > where to go to it's best to get there with as few intermediate steps > as possible. > -Daniel > I don't recall object size being very important actually, so I don't think the distinction is too important, but I'm just arguing for the sake of arguing. With the sg page stuff that Imre did, I think most size calculations unrelated to gtt size are there anyway, and most of our mm (not page allocation) code should only ever care about the gtt. > -- > Daniel Vetter > Software Engineer, Intel Corporation > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
On Mon, Jul 01, 2013 at 03:59:51PM -0700, Ben Widawsky wrote: > On Mon, Jul 01, 2013 at 09:08:58PM +0200, Daniel Vetter wrote: > > On Mon, Jul 1, 2013 at 8:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote: > > >> All of this is addressed in future patches. As we've discussed, I think > > >> I'll have to respin it anyway, so I'll name it as such upfront. To me it > > >> felt a little weird to start calling things "ggtt" before I made the > > >> separation. > > > > > > I think now that we know what the end result should (more or less at > > > least) look like we can aim to make it right the first time we touch a > > > piece of code. That will reduce the churn in the patch series and so > > > make the beast easier to review. > > > > > > Imo foreshadowing (to keep consistent with the "a patch series should > > > tell a story" analogy) is perfectly fine, and in many cases helps in > > > understanding the big picture of a large pile of patches. > > > > I've forgotten to add one thing: If you switch these again later on > > (layz me didn't check for that) it's imo best to stick with those > > names (presuming they fit, since the gtt_size vs. obj->size > > disdinction is a rather important one). Again I think now that we know > > where to go to it's best to get there with as few intermediate steps > > as possible. > > -Daniel > > > > I don't recall object size being very important actually, so I don't > think the distinction is too important, but I'm just arguing for the > sake of arguing. With the sg page stuff that Imre did, I think most size > calculations unrelated to gtt size are there anyway, and most of our mm > (not page allocation) code should only ever care about the gtt. The disdinction is only important on gen2/3, which is why you don't recall it being important ;-) I think you have two options: - Trust me that it's indeed important. - Read up on gen2/3 fencing code and make up your own mind. Cheers, Daniel
On Tue, Jul 02, 2013 at 09:28:56AM +0200, Daniel Vetter wrote: > On Mon, Jul 01, 2013 at 03:59:51PM -0700, Ben Widawsky wrote: > > On Mon, Jul 01, 2013 at 09:08:58PM +0200, Daniel Vetter wrote: > > > On Mon, Jul 1, 2013 at 8:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote: > > > >> All of this is addressed in future patches. As we've discussed, I think > > > >> I'll have to respin it anyway, so I'll name it as such upfront. To me it > > > >> felt a little weird to start calling things "ggtt" before I made the > > > >> separation. > > > > > > > > I think now that we know what the end result should (more or less at > > > > least) look like we can aim to make it right the first time we touch a > > > > piece of code. That will reduce the churn in the patch series and so > > > > make the beast easier to review. > > > > > > > > Imo foreshadowing (to keep consistent with the "a patch series should > > > > tell a story" analogy) is perfectly fine, and in many cases helps in > > > > understanding the big picture of a large pile of patches. > > > > > > I've forgotten to add one thing: If you switch these again later on > > > (layz me didn't check for that) it's imo best to stick with those > > > names (presuming they fit, since the gtt_size vs. obj->size > > > disdinction is a rather important one). Again I think now that we know > > > where to go to it's best to get there with as few intermediate steps > > > as possible. > > > -Daniel > > > > > > > I don't recall object size being very important actually, so I don't > > think the distinction is too important, but I'm just arguing for the > > sake of arguing. With the sg page stuff that Imre did, I think most size > > calculations unrelated to gtt size are there anyway, and most of our mm > > (not page allocation) code should only ever care about the gtt. > > The disdinction is only important on gen2/3, which is why you don't recall > it being important ;-) > > I think you have two options: > - Trust me that it's indeed important. > - Read up on gen2/3 fencing code and make up your own mind. > > Cheers, Daniel I am not saying the distinction doesn't exist. I was saying it's not prevalent in too many places. See the second part of my statement, I believe it holds. But please be clear what you're asking for, do you want 2 getters for vma size vs. obj size? > -- > Daniel Vetter > Software Engineer, Intel Corporation > +41 (0) 79 365 57 48 - http://blog.ffwll.ch
On Tue, Jul 2, 2013 at 6:51 PM, Ben Widawsky <ben@bwidawsk.net> wrote: > On Tue, Jul 02, 2013 at 09:28:56AM +0200, Daniel Vetter wrote: >> On Mon, Jul 01, 2013 at 03:59:51PM -0700, Ben Widawsky wrote: >> > On Mon, Jul 01, 2013 at 09:08:58PM +0200, Daniel Vetter wrote: >> > > On Mon, Jul 1, 2013 at 8:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote: >> > > >> All of this is addressed in future patches. As we've discussed, I think >> > > >> I'll have to respin it anyway, so I'll name it as such upfront. To me it >> > > >> felt a little weird to start calling things "ggtt" before I made the >> > > >> separation. >> > > > >> > > > I think now that we know what the end result should (more or less at >> > > > least) look like we can aim to make it right the first time we touch a >> > > > piece of code. That will reduce the churn in the patch series and so >> > > > make the beast easier to review. >> > > > >> > > > Imo foreshadowing (to keep consistent with the "a patch series should >> > > > tell a story" analogy) is perfectly fine, and in many cases helps in >> > > > understanding the big picture of a large pile of patches. >> > > >> > > I've forgotten to add one thing: If you switch these again later on >> > > (layz me didn't check for that) it's imo best to stick with those >> > > names (presuming they fit, since the gtt_size vs. obj->size >> > > disdinction is a rather important one). Again I think now that we know >> > > where to go to it's best to get there with as few intermediate steps >> > > as possible. >> > > -Daniel >> > > >> > >> > I don't recall object size being very important actually, so I don't >> > think the distinction is too important, but I'm just arguing for the >> > sake of arguing. With the sg page stuff that Imre did, I think most size >> > calculations unrelated to gtt size are there anyway, and most of our mm >> > (not page allocation) code should only ever care about the gtt. >> >> The disdinction is only important on gen2/3, which is why you don't recall >> it being important ;-) >> >> I think you have two options: >> - Trust me that it's indeed important. >> - Read up on gen2/3 fencing code and make up your own mind. >> >> Cheers, Daniel > I am not saying the distinction doesn't exist. I was saying it's not > prevalent in too many places. See the second part of my statement, I > believe it holds. > > But please be clear what you're asking for, do you want 2 getters for > vma size vs. obj size? I guess we need a few different things: - vma->node.size: Most functions can probably just access that one directly (e.g. for writing the ptes), maybe we need something for transition. I guess we could call it obj_gtt_size since while transition there's only the one gtt address space. - The obj size in the gtt. Differs from obj->size on gen2/3 for tiled objects (and has pretty funny rules at that how it changes). Mostly important for debugfs, for the gtt stats. Any access helper which just calls this obj_size is imo highly misleading, hence why I've voted for for obj_gtt_size. - I don't think we need an access helper for obj->size itself, since that is invariant and I don't think we'll move it around either. Cheers, Daniel -- Daniel Vetter Software Engineer, Intel Corporation +41 (0) 79 365 57 48 - http://blog.ffwll.ch
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3d3e770..87f813e 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -122,10 +122,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", obj->pin_count); if (obj->fence_reg != I915_FENCE_REG_NONE) seq_printf(m, " (fence: %d)", obj->fence_reg); - if (obj->gtt_space != NULL) - seq_printf(m, " (gtt offset: %08lx, size: %08x)", - obj->gtt_space->start, - (unsigned int)obj->gtt_space->size); + if (i915_gem_obj_bound(obj)) + seq_printf(m, " (gtt offset: %08lx, size: %08lx)", + i915_gem_obj_offset(obj), + i915_gem_obj_size(obj)); if (obj->stolen) seq_printf(m, " (stolen: %08lx)", obj->stolen->start); if (obj->pin_mappable || obj->fault_mappable) { @@ -176,7 +176,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) describe_obj(m, obj); seq_printf(m, "\n"); total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; + total_gtt_size += i915_gem_obj_size(obj); count++; } mutex_unlock(&dev->struct_mutex); @@ -188,10 +188,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ - size += obj->gtt_space->size; \ + size += i915_gem_obj_size(obj); \ ++count; \ if (obj->map_and_fenceable) { \ - mappable_size += obj->gtt_space->size; \ + mappable_size += i915_gem_obj_size(obj); \ ++mappable_count; \ } \ } \ @@ -268,11 +268,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) size = count = mappable_size = mappable_count = 0; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->fault_mappable) { - size += obj->gtt_space->size; + size += i915_gem_obj_size(obj); ++count; } if (obj->pin_mappable) { - mappable_size += obj->gtt_space->size; + mappable_size += i915_gem_obj_size(obj); ++mappable_count; } if (obj->madv == I915_MADV_DONTNEED) { @@ -334,7 +334,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) describe_obj(m, obj); seq_printf(m, "\n"); total_obj_size += obj->base.size; - total_gtt_size += obj->gtt_space->size; + total_gtt_size += i915_gem_obj_size(obj); count++; } @@ -380,12 +380,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) if (work->old_fb_obj) { struct drm_i915_gem_object *obj = work->old_fb_obj; if (obj) - seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", obj->gtt_space->start); + seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_offset(obj)); } if (work->pending_flip_obj) { struct drm_i915_gem_object *obj = work->pending_flip_obj; if (obj) - seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", obj->gtt_space->start); + seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n", + i915_gem_obj_offset(obj)); } } spin_unlock_irqrestore(&dev->event_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index bc80ce0..56d47bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1349,6 +1349,27 @@ struct drm_i915_gem_object { #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) +static inline unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o) +{ + return o->gtt_space->start; +} + +static inline bool i915_gem_obj_bound(struct drm_i915_gem_object *o) +{ + return o->gtt_space != NULL; +} + +static inline unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o) +{ + return o->gtt_space->size; +} + +static inline void i915_gem_obj_set_color(struct drm_i915_gem_object *o, + enum i915_cache_level color) +{ + o->gtt_space->color = color; +} + /** * Request queue structure. * diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d747a1f..dd2228d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -135,7 +135,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return obj->gtt_space && !obj->active; + return i915_gem_obj_bound(obj) && !obj->active; } int @@ -178,7 +178,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) - pinned += obj->gtt_space->size; + pinned += i915_gem_obj_size(obj); mutex_unlock(&dev->struct_mutex); args->aper_size = i915_gtt_vm->total; @@ -422,7 +422,7 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - if (obj->gtt_space) { + if (i915_gem_obj_bound(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, false); if (ret) return ret; @@ -609,7 +609,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, user_data = to_user_ptr(args->data_ptr); remain = args->size; - offset = obj->gtt_space->start + args->offset; + offset = i915_gem_obj_offset(obj) + args->offset; while (remain > 0) { /* Operation in this page @@ -739,7 +739,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - if (obj->gtt_space) { + if (i915_gem_obj_bound(obj)) { ret = i915_gem_object_set_to_gtt_domain(obj, true); if (ret) return ret; @@ -1361,7 +1361,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) obj->fault_mappable = true; - pfn += (obj->gtt_space->start >> PAGE_SHIFT) + page_offset; + pfn += (i915_gem_obj_offset(obj) >> PAGE_SHIFT) + page_offset; /* Finally, remap it using the new GTT offset */ ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); @@ -1667,7 +1667,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) if (obj->pages == NULL) return 0; - BUG_ON(obj->gtt_space); + BUG_ON(i915_gem_obj_bound(obj)); if (obj->pages_pin_count) return -EBUSY; @@ -2587,7 +2587,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) drm_i915_private_t *dev_priv = obj->base.dev->dev_private; int ret; - if (obj->gtt_space == NULL) + if (!i915_gem_obj_bound(obj)) return 0; if (obj->pin_count) @@ -2669,11 +2669,11 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg, } if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_size(obj); - val = (uint64_t)((obj->gtt_space->start + size - 4096) & + val = (uint64_t)((i915_gem_obj_offset(obj) + size - 4096) & 0xfffff000) << 32; - val |= obj->gtt_space->start & 0xfffff000; + val |= i915_gem_obj_offset(obj) & 0xfffff000; val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I965_FENCE_TILING_Y_SHIFT; @@ -2693,15 +2693,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, u32 val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_size(obj); int pitch_val; int tile_width; - WARN((obj->gtt_space->start & ~I915_FENCE_START_MASK) || + WARN((i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_space->start & (size - 1)), + (i915_gem_obj_offset(obj) & (size - 1)), "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n", - obj->gtt_space->start, obj->map_and_fenceable, size); + i915_gem_obj_offset(obj), obj->map_and_fenceable, size); if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) tile_width = 128; @@ -2712,7 +2712,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg, pitch_val = obj->stride / tile_width; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_space->start; + val = i915_gem_obj_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I915_FENCE_SIZE_BITS(size); @@ -2737,19 +2737,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg, uint32_t val; if (obj) { - u32 size = obj->gtt_space->size; + u32 size = i915_gem_obj_size(obj); uint32_t pitch_val; - WARN((obj->gtt_space->start & ~I830_FENCE_START_MASK) || + WARN((i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) || (size & -size) != size || - (obj->gtt_space->start & (size - 1)), + (i915_gem_obj_offset(obj) & (size - 1)), "object 0x%08lx not 512K or pot-size 0x%08x aligned\n", - obj->gtt_space->start, size); + i915_gem_obj_offset(obj), size); pitch_val = obj->stride / 128; pitch_val = ffs(pitch_val) - 1; - val = obj->gtt_space->start; + val = i915_gem_obj_offset(obj); if (obj->tiling_mode == I915_TILING_Y) val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= I830_FENCE_SIZE_BITS(size); @@ -3030,6 +3030,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev) int err = 0; list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { + unsigned long obj_offset = i915_gem_obj_offset(obj); if (obj->gtt_space == NULL) { printk(KERN_ERR "object found on GTT list with no space reserved\n"); err++; @@ -3038,8 +3039,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) if (obj->cache_level != obj->gtt_space->color) { printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + obj_offset, + obj_offset + i915_gem_obj_size(obj), obj->cache_level, obj->gtt_space->color); err++; @@ -3050,8 +3051,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) obj->gtt_space, obj->cache_level)) { printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n", - obj->gtt_space->start, - obj->gtt_space->start + obj->gtt_space->size, + obj_offset, + obj_offset + i915_gem_obj_size(obj), obj->cache_level); err++; continue; @@ -3267,7 +3268,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) int ret; /* Not valid to be called on unbound objects. */ - if (obj->gtt_space == NULL) + if (!i915_gem_obj_bound(obj)) return -EINVAL; if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) @@ -3332,7 +3333,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, return ret; } - if (obj->gtt_space) { + if (i915_gem_obj_bound(obj)) { ret = i915_gem_object_finish_gpu(obj); if (ret) return ret; @@ -3355,7 +3356,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, i915_ppgtt_bind_object(dev_priv->gtt.aliasing_ppgtt, obj, cache_level); - obj->gtt_space->color = cache_level; + i915_gem_obj_set_color(obj, cache_level); } if (cache_level == I915_CACHE_NONE) { @@ -3636,14 +3637,14 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if (obj->gtt_space != NULL) { - if ((alignment && obj->gtt_space->start & (alignment - 1)) || + if (i915_gem_obj_bound(obj)) { + if ((alignment && i915_gem_obj_offset(obj) & (alignment - 1)) || (map_and_fenceable && !obj->map_and_fenceable)) { WARN(obj->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - obj->gtt_space->start, alignment, + i915_gem_obj_offset(obj), alignment, map_and_fenceable, obj->map_and_fenceable); ret = i915_gem_object_unbind(obj); @@ -3652,7 +3653,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, } } - if (obj->gtt_space == NULL) { + if (!i915_gem_obj_bound(obj)) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; ret = i915_gem_object_bind_to_gtt(obj, alignment, @@ -3678,7 +3679,7 @@ void i915_gem_object_unpin(struct drm_i915_gem_object *obj) { BUG_ON(obj->pin_count == 0); - BUG_ON(obj->gtt_space == NULL); + BUG_ON(!i915_gem_obj_bound(obj)); if (--obj->pin_count == 0) obj->pin_mappable = false; @@ -3728,7 +3729,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, * as the X server doesn't manage domains yet */ i915_gem_object_flush_cpu_write_domain(obj); - args->offset = obj->gtt_space->start; + args->offset = i915_gem_obj_offset(obj); out: drm_gem_object_unreference(&obj->base); unlock: diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 1e838f4..75b4e27 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -395,7 +395,7 @@ mi_set_context(struct intel_ring_buffer *ring, intel_ring_emit(ring, MI_NOOP); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, new_context->obj->gtt_space->start | + intel_ring_emit(ring, i915_gem_obj_offset(new_context->obj) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 67246a6..837372d 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -188,7 +188,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return -ENOENT; target_i915_obj = to_intel_bo(target_obj); - target_offset = target_i915_obj->gtt_space->start; + target_offset = i915_gem_obj_offset(target_i915_obj); /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and * pipe_control writes because the gpu doesn't properly redirect them @@ -280,7 +280,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, return ret; /* Map the page containing the relocation we're going to perform. */ - reloc->offset += obj->gtt_space->start; + reloc->offset += i915_gem_obj_offset(obj); reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, reloc->offset & PAGE_MASK); reloc_entry = (uint32_t __iomem *) @@ -436,8 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, obj->has_aliasing_ppgtt_mapping = 1; } - if (entry->offset != obj->gtt_space->start) { - entry->offset = obj->gtt_space->start; + if (entry->offset != i915_gem_obj_offset(obj)) { + entry->offset = i915_gem_obj_offset(obj); *need_reloc = true; } @@ -458,7 +458,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj) { struct drm_i915_gem_exec_object2 *entry; - if (!obj->gtt_space) + if (!i915_gem_obj_bound(obj)) return; entry = obj->exec_entry; @@ -528,11 +528,13 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + unsigned long obj_offset; bool need_fence, need_mappable; - if (!obj->gtt_space) + if (!i915_gem_obj_bound(obj)) continue; + obj_offset = i915_gem_obj_offset(obj); need_fence = has_fenced_gpu_access && entry->flags & EXEC_OBJECT_NEEDS_FENCE && @@ -540,7 +542,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, need_mappable = need_fence || need_reloc_mappable(obj); if ((entry->alignment && - obj->gtt_space->start & (entry->alignment - 1)) || + obj_offset & (entry->alignment - 1)) || (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else @@ -551,7 +553,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) + if (i915_gem_obj_bound(obj)) continue; ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); @@ -1072,7 +1074,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - exec_start = batch_obj->gtt_space->start + args->batch_start_offset; + exec_start = i915_gem_obj_offset(batch_obj) + args->batch_start_offset; exec_len = args->batch_len; if (cliprects) { for (i = 0; i < args->num_cliprects; i++) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 17e334f..566ab76 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -390,7 +390,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, enum i915_cache_level cache_level) { ppgtt->base.insert_entries(&ppgtt->base, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_offset(obj) >> PAGE_SHIFT, cache_level); } @@ -398,7 +398,7 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_object *obj) { ppgtt->base.clear_range(&ppgtt->base, - obj->gtt_space->start >> PAGE_SHIFT, + i915_gem_obj_offset(obj) >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); } @@ -570,9 +570,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long obj_offset = i915_gem_obj_offset(obj); i915_gtt_vm->insert_entries(&dev_priv->gtt.base, obj->pages, - obj->gtt_space->start >> PAGE_SHIFT, + obj_offset >> PAGE_SHIFT, cache_level); obj->has_global_gtt_mapping = 1; @@ -582,9 +583,10 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + unsigned long obj_offset = i915_gem_obj_offset(obj); i915_gtt_vm->clear_range(&dev_priv->gtt.base, - obj->gtt_space->start >> PAGE_SHIFT, + obj_offset >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT); obj->has_global_gtt_mapping = 0; @@ -682,7 +684,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, uintptr_t gtt_offset = (uintptr_t)obj->gtt_space; int ret; DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n", - obj->gtt_space->start, obj->base.size); + i915_gem_obj_offset(obj), obj->base.size); BUG_ON((gtt_offset & I915_GTT_RESERVED) == 0); gtt_offset = gtt_offset & ~I915_GTT_RESERVED; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7aab12a..2478114 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode) return true; if (INTEL_INFO(obj->base.dev)->gen == 3) { - if (obj->gtt_space->start & ~I915_FENCE_START_MASK) + if (i915_gem_obj_offset(obj) & ~I915_FENCE_START_MASK) return false; } else { - if (obj->gtt_space->start & ~I830_FENCE_START_MASK) + if (i915_gem_obj_offset(obj) & ~I830_FENCE_START_MASK) return false; } size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); - if (obj->gtt_space->size != size) + if (i915_gem_obj_size(obj) != size) return false; - if (obj->gtt_space->start & (size - 1)) + if (i915_gem_obj_offset(obj) & (size - 1)) return false; return true; @@ -358,8 +358,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, * whilst executing a fenced command for an untiled object. */ - obj->map_and_fenceable = obj->gtt_space == NULL || - (obj->gtt_space->start + + obj->map_and_fenceable = !i915_gem_obj_bound(obj) || + (i915_gem_obj_offset(obj) + obj->base.size <= dev_priv->gtt.mappable_end && i915_gem_object_fence_ok(obj, args->tiling_mode)); @@ -369,7 +369,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_get_gtt_alignment(dev, obj->base.size, args->tiling_mode, false); - if (obj->gtt_space->start & (unfenced_alignment - 1)) + if (i915_gem_obj_offset(obj) & (unfenced_alignment - 1)) ret = i915_gem_object_unbind(obj); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 2c4fe36..c0be641 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1512,7 +1512,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, if (dst == NULL) return NULL; - reloc_offset = src->gtt_space->start; + reloc_offset = i915_gem_obj_offset(src); for (i = 0; i < num_pages; i++) { unsigned long flags; void *d; @@ -1564,7 +1564,7 @@ i915_error_object_create_sized(struct drm_i915_private *dev_priv, reloc_offset += PAGE_SIZE; } dst->page_count = num_pages; - dst->gtt_offset = src->gtt_space->start; + dst->gtt_offset = i915_gem_obj_offset(src); return dst; @@ -1618,7 +1618,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->name = obj->base.name; err->rseqno = obj->last_read_seqno; err->wseqno = obj->last_write_seqno; - err->gtt_offset = obj->gtt_space->start; + err->gtt_offset = i915_gem_obj_offset(obj); err->read_domains = obj->base.read_domains; err->write_domain = obj->base.write_domain; err->fence_reg = obj->fence_reg; @@ -1716,8 +1716,8 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, return NULL; obj = ring->private; - if (acthd >= obj->gtt_space->start && - acthd < obj->gtt_space->start + obj->base.size) + if (acthd >= i915_gem_obj_offset(obj) && + acthd < i915_gem_obj_offset(obj) + obj->base.size) return i915_error_object_create(dev_priv, obj); } @@ -1798,7 +1798,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, return; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if ((error->ccid & PAGE_MASK) == obj->gtt_space->start) { + if ((error->ccid & PAGE_MASK) == i915_gem_obj_offset(obj)) { ering->ctx = i915_error_object_create_sized(dev_priv, obj, 1); } @@ -2152,10 +2152,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in if (INTEL_INFO(dev)->gen >= 4) { int dspsurf = DSPSURF(intel_crtc->plane); stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == - obj->gtt_space->start; + i915_gem_obj_offset(obj); } else { int dspaddr = DSPADDR(intel_crtc->plane); - stall_detected = I915_READ(dspaddr) == (obj->gtt_space->start + + stall_detected = I915_READ(dspaddr) == (i915_gem_obj_offset(obj) + crtc->y * crtc->fb->pitches[0] + crtc->x * crtc->fb->bits_per_pixel/8); } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 3db4a68..e4dccb3 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -46,8 +46,8 @@ TRACE_EVENT(i915_gem_object_bind, TP_fast_assign( __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->offset = i915_gem_obj_offset(obj); + __entry->size = i915_gem_obj_size(obj); __entry->mappable = mappable; ), @@ -68,8 +68,8 @@ TRACE_EVENT(i915_gem_object_unbind, TP_fast_assign( __entry->obj = obj; - __entry->offset = obj->gtt_space->start; - __entry->size = obj->gtt_space->size; + __entry->offset = i915_gem_obj_offset(obj); + __entry->size = i915_gem_obj_size(obj); ), TP_printk("obj=%p, offset=%08x size=%x", diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a269d7a..633bfbf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1943,18 +1943,18 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, } DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - obj->gtt_space->start, linear_offset, x, y, + i915_gem_obj_offset(obj), linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_space->start + + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); } else I915_WRITE(DSPADDR(plane), - obj->gtt_space->start + linear_offset); + i915_gem_obj_offset(obj) + linear_offset); POSTING_READ(reg); return 0; @@ -2035,11 +2035,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc, linear_offset -= intel_crtc->dspaddr_offset; DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - obj->gtt_space->start, linear_offset, x, y, + i915_gem_obj_offset(obj), linear_offset, x, y, fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_MODIFY_DISPBASE(DSPSURF(plane), - obj->gtt_space->start+intel_crtc->dspaddr_offset); + i915_gem_obj_offset(obj)+intel_crtc->dspaddr_offset); if (IS_HASWELL(dev)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { @@ -6558,7 +6558,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, goto fail_unpin; } - addr = obj->gtt_space->start; + addr = i915_gem_obj_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; ret = i915_gem_attach_phys_object(dev, obj, @@ -7274,7 +7274,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, - obj->gtt_space->start + intel_crtc->dspaddr_offset); + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, 0); /* aux display base address, unused */ intel_mark_page_flip_active(intel_crtc); @@ -7316,7 +7316,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, - obj->gtt_space->start + intel_crtc->dspaddr_offset); + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, MI_NOOP); intel_mark_page_flip_active(intel_crtc); @@ -7356,7 +7356,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0]); intel_ring_emit(ring, - (obj->gtt_space->start + intel_crtc->dspaddr_offset) | + (i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset) | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -7400,7 +7400,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); intel_ring_emit(ring, - obj->gtt_space->start + intel_crtc->dspaddr_offset); + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -7466,7 +7466,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); intel_ring_emit(ring, - obj->gtt_space->start + intel_crtc->dspaddr_offset); + i915_gem_obj_offset(obj) + intel_crtc->dspaddr_offset); intel_ring_emit(ring, (MI_NOOP)); intel_mark_page_flip_active(intel_crtc); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 242a793..8315a5e 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; - info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_space->start; + info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_offset(obj); info->fix.smem_len = size; info->screen_base = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_space->start, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj), size); if (!info->screen_base) { ret = -ENOSPC; @@ -168,7 +168,7 @@ static int intelfb_create(struct drm_fb_helper *helper, DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, fb->height, - obj->gtt_space->start, obj); + i915_gem_obj_offset(obj), obj); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 93f2671..41654b1 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_space->start); + i915_gem_obj_offset(overlay->reg_bo)); return regs; } @@ -740,7 +740,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, swidth = params->src_w; swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); sheight = params->src_h; - iowrite32(new_bo->gtt_space->start + params->offset_Y, ®s->OBUF_0Y); + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_Y, + ®s->OBUF_0Y); ostride = params->stride_Y; if (params->format & I915_OVERLAY_YUV_PLANAR) { @@ -754,9 +755,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, params->src_w/uv_hscale); swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; sheight |= (params->src_h/uv_vscale) << 16; - iowrite32(new_bo->gtt_space->start + params->offset_U, + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_U, ®s->OBUF_0U); - iowrite32(new_bo->gtt_space->start + params->offset_V, + iowrite32(i915_gem_obj_offset(new_bo) + params->offset_V, ®s->OBUF_0V); ostride |= params->stride_UV << 16; } @@ -1357,7 +1358,7 @@ void intel_setup_overlay(struct drm_device *dev) DRM_ERROR("failed to pin overlay register bo\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->gtt_space->start; + overlay->flip_addr = i915_gem_obj_offset(reg_bo); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); if (ret) { @@ -1436,7 +1437,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) overlay->reg_bo->phys_obj->handle->vaddr; return io_mapping_map_atomic_wc(dev_priv->gtt.mappable, - overlay->reg_bo->gtt_space->start); + i915_gem_obj_offset(overlay->reg_bo)); } static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, @@ -1467,7 +1468,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; else - error->base = overlay->reg_bo->gtt_space->start; + error->base = i915_gem_obj_offset(overlay->reg_bo); regs = intel_overlay_map_regs_atomic(overlay); if (!regs) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 73c0ee1..504d96b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -217,7 +217,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); - I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_space->start | ILK_FBC_RT_VALID); + I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_offset(obj) | ILK_FBC_RT_VALID); /* enable it... */ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -3685,7 +3685,7 @@ static void ironlake_enable_rc6(struct drm_device *dev) intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_space->start | + intel_ring_emit(ring, i915_gem_obj_offset(dev_priv->ips.renderctx) | MI_MM_SPACE_GTT | MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN | @@ -3708,7 +3708,8 @@ static void ironlake_enable_rc6(struct drm_device *dev) return; } - I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_space->start | PWRCTX_EN); + I915_WRITE(PWRCTXA, i915_gem_obj_offset(dev_priv->ips.pwrctx) | + PWRCTX_EN); I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index c4c80c2..64b579f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -424,14 +424,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_space->start); + I915_WRITE_START(ring, i915_gem_obj_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_space->start && + I915_READ_START(ring) == i915_gem_obj_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -489,7 +489,7 @@ init_pipe_control(struct intel_ring_buffer *ring) if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_space->start; + pc->gtt_offset = i915_gem_obj_offset(obj); pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) { ret = -ENOMEM; @@ -1129,7 +1129,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } else { struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = obj->gtt_space->start; + u32 cs_offset = i915_gem_obj_offset(obj); if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1214,7 +1214,7 @@ static int init_status_page(struct intel_ring_buffer *ring) goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_space->start; + ring->status_page.gfx_addr = i915_gem_obj_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; @@ -1308,7 +1308,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_space->start, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index c342571..117a2f8 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -133,7 +133,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb, I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(SPCNTR(pipe, plane), sprctl); - I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_space->start + + I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_offset(obj) + sprsurf_offset); POSTING_READ(SPSURF(pipe, plane)); } @@ -309,7 +309,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(SPRSCALE(pipe), sprscale); I915_WRITE(SPRCTL(pipe), sprctl); I915_MODIFY_DISPBASE(SPRSURF(pipe), - obj->gtt_space->start + sprsurf_offset); + i915_gem_obj_offset(obj) + sprsurf_offset); POSTING_READ(SPRSURF(pipe)); /* potentially re-enable LP watermarks */ @@ -480,7 +480,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, I915_WRITE(DVSSCALE(pipe), dvsscale); I915_WRITE(DVSCNTR(pipe), dvscntr); I915_MODIFY_DISPBASE(DVSSURF(pipe), - obj->gtt_space->start + dvssurf_offset); + i915_gem_obj_offset(obj) + dvssurf_offset); POSTING_READ(DVSSURF(pipe)); }
This will be handy when we add VMs. It's not strictly, necessary, but it will make the code much cleaner. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_debugfs.c | 26 +++++------ drivers/gpu/drm/i915/i915_drv.h | 21 +++++++++ drivers/gpu/drm/i915/i915_gem.c | 69 +++++++++++++++--------------- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 20 +++++---- drivers/gpu/drm/i915/i915_gem_gtt.c | 12 +++--- drivers/gpu/drm/i915/i915_gem_tiling.c | 14 +++--- drivers/gpu/drm/i915/i915_irq.c | 16 +++---- drivers/gpu/drm/i915/i915_trace.h | 8 ++-- drivers/gpu/drm/i915/intel_display.c | 22 +++++----- drivers/gpu/drm/i915/intel_fb.c | 6 +-- drivers/gpu/drm/i915/intel_overlay.c | 15 ++++--- drivers/gpu/drm/i915/intel_pm.c | 7 +-- drivers/gpu/drm/i915/intel_ringbuffer.c | 12 +++--- drivers/gpu/drm/i915/intel_sprite.c | 6 +-- 15 files changed, 143 insertions(+), 113 deletions(-)