@@ -142,7 +142,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (unlikely(r))
goto err_free;
- node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
+ node->base.start = node->mm_nodes[0].start;
} else {
node->mm_nodes[0].start = 0;
node->mm_nodes[0].size = node->base.size;
@@ -1490,8 +1490,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
- offset = (bo->tbo.resource->start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
+ offset = amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
+ offset += bo->tbo.resource->start;
return amdgpu_gmc_sign_extend(offset);
}
@@ -569,7 +569,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
@@ -926,7 +926,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->resource->start = addr >> PAGE_SHIFT;
+ bo->resource->start = addr;
return 0;
}
@@ -527,14 +527,13 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
vres->base.start = 0;
list_for_each_entry(block, &vres->blocks, link) {
- unsigned long start;
+ uint64_t start;
start = amdgpu_vram_mgr_block_start(block) +
amdgpu_vram_mgr_block_size(block);
- start >>= PAGE_SHIFT;
- if (start > PFN_UP(vres->base.size))
- start -= PFN_UP(vres->base.size);
+ if (start > vres->base.size)
+ start -= vres->base.size;
else
start = 0;
vres->base.start = max(vres->base.start, start);
@@ -249,16 +249,6 @@ void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_put);
-static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
-{
- /* Keep TTM behavior for now, remove when drivers are audited */
- if (WARN_ON_ONCE(!gbo->bo.resource ||
- gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
- return 0;
-
- return gbo->bo.resource->start;
-}
-
/**
* drm_gem_vram_offset() - \
Returns a GEM VRAM object's offset in video memory
@@ -275,7 +265,13 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
{
if (WARN_ON_ONCE(!gbo->bo.pin_count))
return (s64)-ENODEV;
- return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
+
+ /* Keep TTM behavior for now, remove when drivers are audited */
+ if (WARN_ON_ONCE(!gbo->bo.resource ||
+ gbo->bo.resource->mem_type == TTM_PL_SYSTEM))
+ return 0;
+
+ return gbo->bo.resource->start;
}
EXPORT_SYMBOL(drm_gem_vram_offset);
@@ -946,7 +946,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
}
if (new_reg)
- nvbo->offset = (new_reg->start << PAGE_SHIFT);
+ nvbo->offset = new_reg->start;
}
@@ -957,7 +957,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- u64 offset = new_reg->start << PAGE_SHIFT;
+ u64 offset = new_reg->start;
*new_tile = NULL;
if (new_reg->mem_type != TTM_PL_VRAM)
@@ -1118,8 +1118,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- reg->bus.offset = (reg->start << PAGE_SHIFT) +
- drm->agp.base;
+ reg->bus.offset = reg->start + drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
reg->bus.caching = ttm_write_combined;
}
@@ -1132,7 +1131,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
}
fallthrough; /* tiled memory */
case TTM_PL_VRAM:
- reg->bus.offset = (reg->start << PAGE_SHIFT) +
+ reg->bus.offset = reg->start +
device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
@@ -1222,7 +1221,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvkm_device *device = nvxx_device(&drm->client.device);
- u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
+ u32 mappable = device->func->resource_size(device, 1);
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
@@ -1241,7 +1240,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->resource->start + PFN_UP(bo->resource->size) < mappable)
+ bo->resource->start + bo->resource->size < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {
@@ -49,9 +49,9 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
{
struct nvif_push *push = chan->chan.push;
u32 src_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, old_reg);
- u32 src_offset = old_reg->start << PAGE_SHIFT;
+ u32 src_offset = old_reg->start;
u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg);
- u32 dst_offset = new_reg->start << PAGE_SHIFT;
+ u32 dst_offset = new_reg->start;
u32 page_count = PFN_UP(new_reg->size);
int ret;
@@ -158,7 +158,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page)
}
mutex_unlock(&drm->master.lock);
- reg->start = mem->mem.addr >> PAGE_SHIFT;
+ reg->start = mem->mem.addr;
return ret;
}
@@ -197,8 +197,8 @@ nouveau_mem_intersects(struct ttm_resource *res,
u32 num_pages = PFN_UP(size);
/* Don't evict BOs outside of the requested placement range */
- if (place->fpfn >= (res->start + num_pages) ||
- (place->lpfn && place->lpfn <= res->start))
+ if (place->fpfn >= ((res->start >> PAGE_SHIFT) + num_pages) ||
+ (place->lpfn && place->lpfn <= (res->start >> PAGE_SHIFT)))
return false;
return true;
@@ -211,8 +211,8 @@ nouveau_mem_compatible(struct ttm_resource *res,
{
u32 num_pages = PFN_UP(size);
- if (res->start < place->fpfn ||
- (place->lpfn && (res->start + num_pages) > place->lpfn))
+ if ((res->start >> PAGE_SHIFT) < place->fpfn ||
+ (place->lpfn && ((res->start >> PAGE_SHIFT) + num_pages) > place->lpfn))
return false;
return true;
@@ -145,7 +145,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
return ret;
}
- (*res)->start = mem->vma[0].addr >> PAGE_SHIFT;
+ (*res)->start = mem->vma[0].addr;
return 0;
}
@@ -79,7 +79,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct ttm_resource *reg = priv->bo->bo.resource;
struct nv10_fence_chan *fctx;
- u32 start = reg->start * PAGE_SIZE;
+ u32 start = reg->start;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret = 0;
@@ -38,7 +38,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_priv *priv = chan->drm->fence;
struct nv10_fence_chan *fctx;
struct ttm_resource *reg = priv->bo->bo.resource;
- u32 start = reg->start * PAGE_SIZE;
+ u32 start = reg->start;
u32 limit = start + priv->bo->bo.base.size - 1;
int ret;
@@ -289,7 +289,7 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
/* TODO - need to hold one of the locks to read bo->tbo.resource->start */
- return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
+ return slot->high_bits | (bo->tbo.resource->start + offset);
}
/* qxl_display.c */
@@ -220,7 +220,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
else
goto fallback;
- offset = bo->tbo.resource->start << PAGE_SHIFT;
+ offset = bo->tbo.resource->start;
return io_mapping_map_atomic_wc(map, offset + page_offset);
fallback:
if (bo->kptr) {
@@ -81,13 +81,12 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0;
case TTM_PL_VRAM:
mem->bus.is_iomem = true;
- mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
+ mem->bus.offset = mem->start + qdev->vram_base;
mem->bus.caching = ttm_write_combined;
break;
case TTM_PL_PRIV:
mem->bus.is_iomem = true;
- mem->bus.offset = (mem->start << PAGE_SHIFT) +
- qdev->surfaceram_base;
+ mem->bus.offset = mem->start + qdev->surfaceram_base;
mem->bus.caching = ttm_write_combined;
break;
default:
@@ -588,7 +588,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
out:
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
- bo->tbo.resource->start << PAGE_SHIFT,
+ bo->tbo.resource->start,
bo->tbo.base.size);
return 0;
}
@@ -738,7 +738,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
size = bo->resource->size;
- offset = bo->resource->start << PAGE_SHIFT;
+ offset = bo->resource->start;
if ((offset + size) <= rdev->mc.visible_vram_size)
return 0;
@@ -760,7 +760,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (likely(!r)) {
- offset = bo->resource->start << PAGE_SHIFT;
+ offset = bo->resource->start;
/* this should never happen */
if ((offset + size) > rdev->mc.visible_vram_size)
return VM_FAULT_SIGBUS;
@@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
break;
}
- return (bo->tbo.resource->start << PAGE_SHIFT) + start;
+ return bo->tbo.resource->start + start;
}
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
@@ -104,7 +104,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
- bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
+ bo->resource->start < rbo->rdev->mc.visible_vram_size) {
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
int i;
@@ -149,8 +149,8 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
rdev = radeon_get_rdev(bo->bdev);
ridx = radeon_copy_ring_index(rdev);
- old_start = (u64)old_mem->start << PAGE_SHIFT;
- new_start = (u64)new_mem->start << PAGE_SHIFT;
+ old_start = (u64)old_mem->start;
+ new_start = (u64)new_mem->start;
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
@@ -274,15 +274,14 @@ static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
/* RADEON_IS_AGP is set only if AGP is active */
- mem->bus.offset = (mem->start << PAGE_SHIFT) +
- rdev->mc.agp_base;
+ mem->bus.offset = mem->start + rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->agp->cant_use_aperture;
mem->bus.caching = ttm_write_combined;
}
#endif
break;
case TTM_PL_VRAM:
- mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > rdev->mc.visible_vram_size)
return -EINVAL;
@@ -443,7 +442,7 @@ static int radeon_ttm_backend_bind(struct ttm_device *bdev,
flags &= ~RADEON_GART_PAGE_WRITE;
}
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ gtt->offset = (unsigned long)bo_mem->start;
if (!ttm->num_pages) {
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
@@ -945,7 +945,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE;
if (mem) {
- addr = (u64)mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start;
if (mem->mem_type != TTM_PL_SYSTEM)
bo_va->flags |= RADEON_VM_PAGE_VALID;
@@ -95,7 +95,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
return ret;
}
- node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
+ node->base.start = node->mm_nodes[0].start;
*res = &node->base;
return 0;
}
@@ -188,7 +188,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
* that situation.
*/
if (bo->resource->mem_type == TTM_PL_VRAM &&
- bo->resource->start < PFN_UP(bo->resource->size) &&
+ bo->resource->start < bo->resource->size &&
bo->resource->start > 0 &&
buf->tbo.pin_count == 0) {
ctx.interruptible = false;
@@ -258,7 +258,7 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
{
if (bo->resource->mem_type == TTM_PL_VRAM) {
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
- ptr->offset = bo->resource->start << PAGE_SHIFT;
+ ptr->offset = bo->resource->start;
} else {
ptr->gmrId = bo->resource->start;
ptr->offset = 0;
@@ -584,7 +584,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
if (bo->resource->mem_type == TTM_PL_VRAM) {
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
- cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
+ cmd->body.guestResult.offset = bo->resource->start;
} else {
cmd->body.guestResult.gmrId = bo->resource->start;
cmd->body.guestResult.offset = 0;
@@ -3764,7 +3764,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
bo = &reloc->vbo->tbo;
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
- reloc->location->offset += bo->resource->start << PAGE_SHIFT;
+ reloc->location->offset += bo->resource->start;
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
break;
case VMW_PL_GMR:
@@ -462,8 +462,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *
case VMW_PL_MOB:
return 0;
case TTM_PL_VRAM:
- mem->bus.offset = (mem->start << PAGE_SHIFT) +
- dev_priv->vram_start;
+ mem->bus.offset = mem->start + dev_priv->vram_start;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_cached;
break;
@@ -207,7 +207,7 @@ struct ttm_bus_placement {
* buffer object.
*/
struct ttm_resource {
- unsigned long start;
+ uint64_t start;
size_t size;
uint32_t mem_type;
uint32_t placement;