@@ -116,7 +116,6 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_resource **res)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- uint32_t num_pages = PFN_UP(tbo->base.size);
struct ttm_range_mgr_node *node;
int r;
@@ -134,17 +133,19 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
if (place->lpfn) {
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
- num_pages, tbo->page_alignment,
- 0, place->fpfn, place->lpfn,
+ tbo->base.size,
+ tbo->page_alignment << PAGE_SHIFT, 0,
+ (u64)place->fpfn << PAGE_SHIFT,
+ (u64)place->lpfn << PAGE_SHIFT,
DRM_MM_INSERT_BEST);
spin_unlock(&mgr->lock);
if (unlikely(r))
goto err_free;
- node->base.start = node->mm_nodes[0].start;
+ node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
} else {
node->mm_nodes[0].start = 0;
- node->mm_nodes[0].size = PFN_UP(node->base.size);
+ node->mm_nodes[0].size = node->base.size;
node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
@@ -285,8 +286,8 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
- start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
- size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+ start = (AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS) << PAGE_SHIFT;
+ size = adev->gmc.gart_size - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
@@ -94,8 +94,8 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
while (start >= node->size << PAGE_SHIFT)
start -= node++->size << PAGE_SHIFT;
- cur->start = (node->start << PAGE_SHIFT) + start;
- cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->start = node->start + start;
+ cur->size = min(node->size - start, size);
cur->remaining = size;
cur->node = node;
break;
@@ -155,8 +155,8 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
node = cur->node;
cur->node = ++node;
- cur->start = node->start << PAGE_SHIFT;
- cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ cur->start = node->start;
+ cur->size = min(node->size, cur->remaining);
break;
default:
return;
@@ -94,7 +94,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
if (!rsgt)
return ERR_PTR(-ENOMEM);
- i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
+ i915_refct_sgt_init(rsgt, node->size);
st = &rsgt->table;
/* restricted by sg_alloc_table */
if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
@@ -110,8 +110,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
sg = st->sgl;
st->nents = 0;
prev_end = (resource_size_t)-1;
- block_size = node->size << PAGE_SHIFT;
- offset = node->start << PAGE_SHIFT;
+ block_size = node->size;
+ offset = node->start;
while (block_size) {
u64 len;
@@ -64,10 +64,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
struct ttm_range_mgr_node *node;
struct drm_mm *mm = &rman->mm;
enum drm_mm_insert_mode mode;
- unsigned long lpfn;
+ u64 lpfn;
int ret;
- lpfn = place->lpfn;
+ lpfn = place->lpfn << PAGE_SHIFT;
if (!lpfn)
lpfn = man->size;
@@ -83,9 +83,10 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
spin_lock(&rman->lock);
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
- PFN_UP(node->base.size),
- bo->page_alignment, 0,
- place->fpfn, lpfn, mode);
+ node->base.size,
+ bo->page_alignment << PAGE_SHIFT, 0,
+ (u64)place->fpfn << PAGE_SHIFT, lpfn,
+ mode);
spin_unlock(&rman->lock);
if (unlikely(ret)) {
@@ -94,7 +95,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
return ret;
}
- node->base.start = node->mm_nodes[0].start;
+ node->base.start = node->mm_nodes[0].start >> PAGE_SHIFT;
*res = &node->base;
return 0;
}
@@ -119,11 +120,10 @@ static bool ttm_range_man_intersects(struct ttm_resource_manager *man,
size_t size)
{
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
- u32 num_pages = PFN_UP(size);
/* Don't evict BOs outside of the requested placement range */
- if (place->fpfn >= (node->start + num_pages) ||
- (place->lpfn && place->lpfn <= node->start))
+ if (((u64)place->fpfn << PAGE_SHIFT) >= (node->start + size) ||
+ (place->lpfn && ((u64)place->lpfn << PAGE_SHIFT) <= node->start))
return false;
return true;
@@ -135,10 +135,10 @@ static bool ttm_range_man_compatible(struct ttm_resource_manager *man,
size_t size)
{
struct drm_mm_node *node = &to_ttm_range_mgr_node(res)->mm_nodes[0];
- u32 num_pages = PFN_UP(size);
- if (node->start < place->fpfn ||
- (place->lpfn && (node->start + num_pages) > place->lpfn))
+ if (node->start < (u64)place->fpfn << PAGE_SHIFT ||
+ (place->lpfn && (node->start + size) >
+ (u64)place->lpfn << PAGE_SHIFT))
return false;
return true;