@@ -278,7 +278,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
info->fix.smem_len = vma->size;
}
@@ -379,22 +379,25 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
const struct i915_vma *vma,
unsigned int flags)
{
- if (vma->node.size < entry->pad_to_size)
+ const u64 start = i915_vma_offset(vma);
+ const u64 size = i915_vma_size(vma);
+
+ if (size < entry->pad_to_size)
return true;
- if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+ if (entry->alignment && !IS_ALIGNED(start, entry->alignment))
return true;
if (flags & EXEC_OBJECT_PINNED &&
- vma->node.start != entry->offset)
+ start != entry->offset)
return true;
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
- vma->node.start < BATCH_OFFSET_BIAS)
+ start < BATCH_OFFSET_BIAS)
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size + 4095) >> 32)
+ (start + size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -440,7 +443,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
int err;
if (vma->node.size)
- pin_flags = vma->node.start;
+ pin_flags = __i915_vma_offset(vma);
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
@@ -674,8 +677,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
if (err)
return err;
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
@@ -995,8 +998,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
return err;
if (!err) {
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
@@ -1078,7 +1081,7 @@ static inline u64
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
const struct i915_vma *target)
{
- return gen8_canonical_addr((int)reloc->delta + target->node.start);
+ return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target));
}
static void reloc_cache_init(struct reloc_cache *cache,
@@ -1289,7 +1292,7 @@ static void *reloc_iomap(struct i915_vma *batch,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- cache->node.start = vma->node.start;
+ cache->node.start = i915_ggtt_offset(vma);
cache->node.mm = (void *)vma;
}
}
@@ -1452,7 +1455,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -2373,7 +2376,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
}
err = rq->context->engine->emit_bb_start(rq,
- batch->node.start +
+ i915_vma_offset(batch) +
eb->batch_start_offset,
batch_len,
eb->batch_flags);
@@ -2384,7 +2387,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
GEM_BUG_ON(intel_context_is_parallel(rq->context));
GEM_BUG_ON(eb->batch_start_offset);
err = rq->context->engine->emit_bb_start(rq,
- eb->trampoline->node.start +
+ i915_vma_offset(eb->trampoline) +
batch_len, 0, 0);
if (err)
return err;
@@ -395,7 +395,7 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret)
@@ -400,7 +400,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
- unsigned long count = vma->node.size >> PAGE_SHIFT;
+ unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
@@ -168,11 +168,11 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
return true;
size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
- if (vma->node.size < size)
+ if (i915_vma_size(vma) < size)
return false;
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
- if (!IS_ALIGNED(vma->node.start, alignment))
+ if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
return false;
return true;
@@ -404,7 +404,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
* Maintaining alignment is required to utilise huge pages in the ppGGT.
*/
if (i915_gem_object_is_lmem(obj) &&
- IS_ALIGNED(vma->node.start, SZ_2M) &&
+ IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
vma->page_sizes.sg & SZ_2M &&
vma->resource->page_sizes_gtt < SZ_2M) {
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
@@ -96,14 +96,14 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
*cs++ = 0;
*cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
*cs++ = 0;
*cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(src->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(src->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(src->vma));
*cs++ = MI_BATCH_BUFFER_END;
@@ -319,7 +319,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
{
int err;
- if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+ if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
err = i915_vma_unbind_unlocked(vma);
if (err)
return err;
@@ -329,6 +329,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
if (err)
return err;
+ GEM_BUG_ON(i915_vma_offset(vma) != addr);
return 0;
}
@@ -375,8 +376,8 @@ tiled_blit(struct tiled_blits *t,
err = move_to_active(dst->vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- t->batch->node.start,
- t->batch->node.size,
+ i915_vma_offset(t->batch),
+ i915_vma_size(t->batch),
0);
i915_request_get(rq);
i915_request_add(rq);
@@ -900,8 +900,8 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs,
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base));
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
+ *cmd++ = upper_32_bits(i915_vma_offset(vma));
*cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(rpcs, 0, 64);
@@ -989,7 +989,8 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
}
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
if (err)
goto skip_request;
@@ -1552,7 +1553,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), 0);
if (err)
goto skip_request;
@@ -1657,7 +1659,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = offset;
*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
*cmd++ = reg;
- *cmd++ = vma->node.start + result;
+ *cmd++ = i915_vma_offset(vma) + result;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
@@ -1688,7 +1690,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), flags);
if (err)
goto skip_request;
@@ -1534,7 +1534,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
- err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
i915_request_get(rq);
i915_request_add(rq);
@@ -62,8 +62,8 @@ igt_emit_store_dw(struct i915_vma *vma,
goto err;
}
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
+ offset += i915_vma_offset(vma);
for (n = 0; n < count; n++) {
if (ver >= 8) {
@@ -151,7 +151,8 @@ int igt_gpu_fill_dw(struct intel_context *ce,
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
flags);
skip_request:
@@ -106,7 +106,7 @@ static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
static u32 batch_addr(const struct batch_chunk *bc)
{
- return bc->vma->node.start;
+ return i915_vma_offset(bc->vma);
}
static void batch_add(struct batch_chunk *bc, const u32 d)
@@ -220,7 +220,8 @@ static int fence_update(struct i915_fence_reg *fence,
return ret;
}
- fence->start = vma->node.start;
+ GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
+ fence->start = i915_ggtt_offset(vma);
fence->size = vma->fence_size;
fence->stride = i915_gem_object_get_stride(vma->obj);
fence->tiling = i915_gem_object_get_tiling(vma->obj);
@@ -63,7 +63,7 @@ static int render_state_setup(struct intel_renderstate *so,
u32 s = rodata->batch[i];
if (i * 4 == rodata->reloc[reloc_index]) {
- u64 r = s + so->vma->node.start;
+ u64 r = s + i915_vma_offset(so->vma);
s = lower_32_bits(r);
if (HAS_64BIT_RELOC(i915)) {
@@ -892,7 +892,7 @@ static int clear_residuals(struct i915_request *rq)
}
ret = engine->emit_bb_start(rq,
- engine->wa_ctx.vma->node.start, 0,
+ i915_vma_offset(engine->wa_ctx.vma), 0,
0);
if (ret)
return ret;
@@ -165,7 +165,7 @@ static int perf_mi_bb_start(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, 8,
+ i915_vma_offset(batch), 8,
0);
if (err)
goto out;
@@ -305,7 +305,7 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- base->node.start, 8,
+ i915_vma_offset(base), 8,
0);
if (err)
goto out;
@@ -315,8 +315,8 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- nop->node.start,
- nop->node.size,
+ i915_vma_offset(nop),
+ i915_vma_size(nop),
0);
if (err)
goto out;
@@ -2733,11 +2733,11 @@ static int create_gang(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
if (*prev) {
- u64 offset = (*prev)->batch->node.start;
+ u64 offset = i915_vma_offset((*prev)->batch);
/* Terminate the spinner in the next lower priority batch. */
*cs++ = MI_STORE_DWORD_IMM_GEN4;
@@ -2765,7 +2765,7 @@ static int create_gang(struct intel_engine_cs *engine,
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_vma_unlock(vma);
i915_request_add(rq);
@@ -3093,7 +3093,7 @@ create_gpr_user(struct intel_engine_cs *engine,
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
- addr = result->node.start + offset + i * sizeof(*cs);
+ addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = CS_GPR(engine, 2 * i);
*cs++ = lower_32_bits(addr);
@@ -3103,8 +3103,8 @@ create_gpr_user(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(result));
+ *cs++ = upper_32_bits(i915_vma_offset(result));
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -3188,7 +3188,7 @@ create_gpr_client(struct intel_engine_cs *engine,
err = i915_vma_move_to_active(batch, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- batch->node.start,
+ i915_vma_offset(batch),
PAGE_SIZE, 0);
i915_vma_unlock(batch);
i915_vma_unpin(batch);
@@ -3520,7 +3520,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_vma_unlock(vma);
}
@@ -96,7 +96,8 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
- return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+ return i915_vma_offset(hws) +
+ offset_in_page(sizeof(u32) * rq->fence.context);
}
static int move_to_active(struct i915_vma *vma,
@@ -196,8 +197,8 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
+ *batch++ = upper_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
@@ -210,7 +211,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
@@ -223,7 +224,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -235,7 +236,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -250,7 +251,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
if (GRAPHICS_VER(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {
@@ -957,8 +957,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
while (len--) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = hw[dw];
- *cs++ = lower_32_bits(scratch->node.start + x);
- *cs++ = upper_32_bits(scratch->node.start + x);
+ *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+ *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
dw += 2;
x += 4;
@@ -1040,8 +1040,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_before->node.start);
- *cs++ = upper_32_bits(b_before->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_before));
+ *cs++ = upper_32_bits(i915_vma_offset(b_before));
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
@@ -1056,8 +1056,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_after->node.start);
- *cs++ = upper_32_bits(b_after->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_after));
+ *cs++ = upper_32_bits(i915_vma_offset(b_after));
intel_ring_advance(rq, cs);
@@ -1165,8 +1165,8 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(batch->node.start);
- *cs++ = upper_32_bits(batch->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(batch));
+ *cs++ = upper_32_bits(i915_vma_offset(batch));
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
@@ -50,7 +50,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
} else {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
}
- *cs++ = vma->node.start + 4000;
+ *cs++ = i915_vma_offset(vma) + 4000;
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
@@ -122,14 +122,14 @@ create_spin_counter(struct intel_engine_cs *engine,
if (srm) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
- *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
}
}
*cs++ = MI_BATCH_BUFFER_START_GEN8;
- *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
GEM_BUG_ON(cs - base > end);
i915_gem_object_flush_map(obj);
@@ -657,7 +657,7 @@ int live_rps_frequency_cs(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
@@ -798,7 +798,7 @@ int live_rps_frequency_srm(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
@@ -523,7 +523,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
struct i915_gem_ww_ctx ww;
- u64 addr = scratch->node.start;
+ u64 addr = i915_vma_offset(scratch);
struct i915_request *rq;
u32 srm, lrm, rsvd;
u32 expect;
@@ -646,7 +646,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
goto err_request;
err = engine->emit_bb_start(rq,
- batch->node.start, PAGE_SIZE,
+ i915_vma_offset(batch), PAGE_SIZE,
0);
if (err)
goto err_request;
@@ -878,7 +878,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
}
for (i = 0; i < engine->whitelist.count; i++) {
- u64 offset = results->node.start + sizeof(u32) * i;
+ u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
/* Clear non priv flags */
@@ -952,7 +952,7 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
goto err_request;
/* Perform the writes from an unprivileged "user" batch */
- err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
err_request:
err = request_add_sync(rq, err);
@@ -1471,8 +1471,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
/* Defer failure until attempted use */
jump_whitelist = alloc_whitelist(batch_length);
- shadow_addr = gen8_canonical_addr(shadow->node.start);
- batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+ shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
+ batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
/*
* We use the batch length as size because the shadow object is as
@@ -183,7 +183,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
stringify_vma_type(vma),
- vma->node.start, vma->node.size,
+ i915_vma_offset(vma), i915_vma_size(vma),
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
@@ -2021,7 +2021,7 @@ emit_oa_config(struct i915_perf_stream *stream,
goto err_add_request;
err = rq->engine->emit_bb_start(rq,
- vma->node.start, 0,
+ i915_vma_offset(vma), 0,
I915_DISPATCH_SECURE);
if (err)
goto err_add_request;
@@ -407,8 +407,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
obj->mm.rsgt, i915_gem_object_is_readonly(obj),
i915_gem_object_is_lmem(obj), obj->mm.region,
- vma->ops, vma->private, vma->node.start,
- vma->node.size, vma->size);
+ vma->ops, vma->private, __i915_vma_offset(vma),
+ __i915_vma_size(vma), vma->size);
}
/**
@@ -436,7 +436,7 @@ int i915_vma_bind(struct i915_vma *vma,
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->size > vma->node.size);
+ GEM_BUG_ON(vma->size > i915_vma_size(vma));
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
@@ -574,8 +574,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
vma->obj->base.size);
else
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
- vma->node.start,
- vma->node.size);
+ i915_vma_offset(vma),
+ i915_vma_size(vma));
if (ptr == NULL) {
err = -ENOMEM;
goto err;
@@ -649,22 +649,22 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
return true;
- if (vma->node.size < size)
+ if (i915_vma_size(vma) < size)
return true;
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
- if (alignment && !IS_ALIGNED(vma->node.start, alignment))
+ if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
+ i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
return true;
if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
+ i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
return true;
return false;
@@ -677,10 +677,11 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
- fenceable = (vma->node.size >= vma->fence_size &&
- IS_ALIGNED(vma->node.start, vma->fence_alignment));
+ fenceable = (i915_vma_size(vma) >= vma->fence_size &&
+ IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
- mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+ mappable = i915_ggtt_offset(vma) + vma->fence_size <=
+ i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -124,13 +124,58 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return !list_empty(&vma->closed_link);
}
+/* Internal use only. */
+static inline u64 __i915_vma_size(const struct i915_vma *vma)
+{
+ return vma->node.size;
+}
+
+/**
+ * i915_vma_offset - Obtain the va range size of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address range size
+ * with padding subtracted.
+ *
+ * Return: The effective virtual address range size.
+ */
+static inline u64 i915_vma_size(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_size(vma);
+}
+
+/* Internal use only. */
+static inline u64 __i915_vma_offset(const struct i915_vma *vma)
+{
+ return vma->node.start;
+}
+
+/**
+ * i915_vma_offset - Obtain the va offset of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address offset the gpu
+ * should use to access the bound data.
+ *
+ * Return: The effective virtual address offset.
+ */
+static inline u64 i915_vma_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_offset(vma);
+}
+
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
+ i915_vma_size(vma) - 1));
+ return lower_32_bits(i915_vma_offset(vma));
}
static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
@@ -52,8 +52,10 @@ struct i915_page_sizes {
* @mr: The memory region of the object pointed to by the vma.
* @ops: Pointer to the backend i915_vma_ops.
* @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start. Note that
+ * this is after any padding that might have been allocated.
+ * @node_size: Size of the allocated range manager node with padding
+ * subtracted.
* @vma_size: Bind size.
* @page_sizes_gtt: Resulting page sizes from the bind operation.
* @bound_flags: Flags indicating binding status.
@@ -170,8 +172,8 @@ static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
* @mr: The memory region of the object the vma points to.
* @ops: The backend ops.
* @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start after padding.
+ * @node_size: Size of the allocated range manager node minus padding.
* @size: Bind size.
*
* Initializes a vma resource allocated using i915_vma_resource_alloc().
@@ -997,8 +997,8 @@ empty_request(struct intel_engine_cs *engine,
return request;
err = engine->emit_bb_start(request,
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
I915_DISPATCH_SECURE);
if (err)
goto out_request;
@@ -1118,14 +1118,14 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (ver >= 8) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
+ *cmd++ = upper_32_bits(i915_vma_offset(vma));
} else if (ver >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
- *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
} else {
*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -1209,8 +1209,8 @@ static int live_all_engines(void *arg)
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
@@ -1339,8 +1339,8 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
@@ -116,7 +116,7 @@ static unsigned int seqno_offset(u64 fence)
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
- return hws->node.start + seqno_offset(rq->fence.context);
+ return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
}
static int move_to_active(struct i915_vma *vma,
@@ -203,8 +203,8 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
+ *batch++ = upper_32_bits(i915_vma_offset(vma));
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -219,7 +219,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
flags = 0;
if (GRAPHICS_VER(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {