@@ -987,7 +987,7 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
spin_lock(bo->bdev->lru_lock);
if (shrinkable) {
/* Try to keep shmem_tt from being considered for shrinking. */
- bo->priority = TTM_MAX_BO_PRIORITY - 1;
+ bo->priority = DRM_MAX_LRU_PRIORITY - 1;
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
@@ -63,7 +63,7 @@ int intel_region_to_ttm_type(const struct intel_memory_region *mem)
return TTM_PL_SYSTEM;
type = mem->instance + TTM_PL_PRIV;
- GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
+ GEM_BUG_ON(type >= DRM_NUM_MEM_TYPES);
return type;
}
@@ -111,7 +111,7 @@ mock_region_create(struct drm_i915_private *i915,
resource_size_t io_size)
{
int instance = ida_alloc_max(&i915->selftest.mock_region_instances,
- TTM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
+ DRM_NUM_MEM_TYPES - TTM_PL_PRIV - 1,
GFP_KERNEL);
if (instance < 0)
@@ -148,7 +148,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
int ret;
spin_lock(bdev->lru_lock);
- for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (i = TTM_PL_SYSTEM; i < DRM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
@@ -245,7 +245,7 @@ void ttm_device_fini(struct ttm_device *bdev)
destroy_workqueue(bdev->wq);
spin_lock(bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ for (i = 0; i < DRM_MAX_LRU_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(bdev->lru_lock);
@@ -287,12 +287,12 @@ void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
ttm_device_clear_lru_dma_mappings(bdev, &bdev->pinned);
- for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ for (i = TTM_PL_SYSTEM; i < DRM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
continue;
- for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j)
+ for (j = 0; j < DRM_MAX_LRU_PRIORITY; ++j)
ttm_device_clear_lru_dma_mappings(bdev, &man->lru[j]);
}
}
@@ -54,8 +54,8 @@ void ttm_lru_bulk_move_tail(struct ttm_lru_bulk_move *bulk)
{
unsigned i, j;
- for (i = 0; i < TTM_NUM_MEM_TYPES; ++i) {
- for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ for (i = 0; i < DRM_NUM_MEM_TYPES; ++i) {
+ for (j = 0; j < DRM_MAX_LRU_PRIORITY; ++j) {
struct ttm_lru_bulk_move_pos *pos = &bulk->pos[i][j];
struct ttm_resource_manager *man;
@@ -393,7 +393,7 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
man->size = size;
man->usage = 0;
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ for (i = 0; i < DRM_MAX_LRU_PRIORITY; ++i)
INIT_LIST_HEAD(&man->lru[i]);
man->move = NULL;
}
@@ -425,7 +425,7 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
*/
spin_lock(bdev->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
+ for (i = 0; i < DRM_MAX_LRU_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(bdev->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
@@ -504,7 +504,7 @@ ttm_resource_manager_first(struct ttm_resource_manager *man,
lockdep_assert_held(man->bdev->lru_lock);
- for (cursor->priority = 0; cursor->priority < TTM_MAX_BO_PRIORITY;
+ for (cursor->priority = 0; cursor->priority < DRM_MAX_LRU_PRIORITY;
++cursor->priority)
list_for_each_entry(res, &man->lru[cursor->priority], lru)
return res;
@@ -531,7 +531,7 @@ ttm_resource_manager_next(struct ttm_resource_manager *man,
list_for_each_entry_continue(res, &man->lru[cursor->priority], lru)
return res;
- for (++cursor->priority; cursor->priority < TTM_MAX_BO_PRIORITY;
+ for (++cursor->priority; cursor->priority < DRM_MAX_LRU_PRIORITY;
++cursor->priority)
list_for_each_entry(res, &man->lru[cursor->priority], lru)
return res;
@@ -383,7 +383,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
memset(vmw_bo, 0, sizeof(*vmw_bo));
- BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ BUILD_BUG_ON(DRM_MAX_LRU_PRIORITY <= 3);
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
@@ -82,7 +82,7 @@ struct vmw_bo {
struct ttm_bo_kmap_obj map;
struct rb_root res_tree;
- u32 res_prios[TTM_MAX_BO_PRIORITY];
+ u32 res_prios[DRM_MAX_LRU_PRIORITY];
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
@@ -72,7 +72,7 @@
#define XE_PL_TT TTM_PL_TT
#define XE_PL_VRAM0 TTM_PL_VRAM
#define XE_PL_VRAM1 (XE_PL_VRAM0 + 1)
-#define XE_PL_STOLEN (TTM_NUM_MEM_TYPES - 1)
+#define XE_PL_STOLEN (DRM_NUM_MEM_TYPES - 1)
#define XE_BO_PROPS_INVALID (-1)
@@ -235,7 +235,7 @@ struct ttm_device {
/**
* @man_drv: An array of resource_managers, one per resource type.
*/
- struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+ struct ttm_resource_manager *man_drv[DRM_NUM_MEM_TYPES];
/**
* @vma_manager: Address space manager for finding BOs to mmap.
@@ -277,14 +277,14 @@ static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
{
BUILD_BUG_ON(__builtin_constant_p(mem_type)
- && mem_type >= TTM_NUM_MEM_TYPES);
+ && mem_type >= DRM_NUM_MEM_TYPES);
return bdev->man_drv[mem_type];
}
static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
struct ttm_resource_manager *manager)
{
- BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= DRM_NUM_MEM_TYPES);
bdev->man_drv[type] = manager;
}
@@ -43,14 +43,14 @@ static __always_inline int ttm_range_man_init(struct ttm_device *bdev,
unsigned int type, bool use_tt,
unsigned long p_size)
{
- BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= DRM_NUM_MEM_TYPES);
return ttm_range_man_init_nocheck(bdev, type, use_tt, p_size);
}
static __always_inline int ttm_range_man_fini(struct ttm_device *bdev,
unsigned int type)
{
- BUILD_BUG_ON(__builtin_constant_p(type) && type >= TTM_NUM_MEM_TYPES);
+ BUILD_BUG_ON(__builtin_constant_p(type) && type >= DRM_NUM_MEM_TYPES);
return ttm_range_man_fini_nocheck(bdev, type);
}
#endif
@@ -34,9 +34,7 @@
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
#include <drm/ttm/ttm_kmap_iter.h>
-
-#define TTM_MAX_BO_PRIORITY 4U
-#define TTM_NUM_MEM_TYPES 8
+#include <drm/drm_evictable_lru.h>
struct ttm_device;
struct ttm_resource_manager;
@@ -167,7 +165,7 @@ struct ttm_resource_manager {
/*
* Protected by the bdev->lru_lock.
*/
- struct list_head lru[TTM_MAX_BO_PRIORITY];
+ struct list_head lru[DRM_MAX_LRU_PRIORITY];
/**
* @usage: How much of the resources are used, protected by the
@@ -253,7 +251,7 @@ struct ttm_lru_bulk_move_pos {
* ttm_lru_bulk_move_init() and ttm_bo_set_bulk_move().
*/
struct ttm_lru_bulk_move {
- struct ttm_lru_bulk_move_pos pos[TTM_NUM_MEM_TYPES][TTM_MAX_BO_PRIORITY];
+ struct ttm_lru_bulk_move_pos pos[DRM_NUM_MEM_TYPES][DRM_MAX_LRU_PRIORITY];
};
/**
@@ -309,7 +307,7 @@ ttm_resource_manager_set_used(struct ttm_resource_manager *man, bool used)
{
int i;
- for (i = 0; i < TTM_MAX_BO_PRIORITY; i++)
+ for (i = 0; i < DRM_MAX_LRU_PRIORITY; i++)
WARN_ON(!list_empty(&man->lru[i]));
man->use_type = used;
}
TTM_MAX_BO_PRIORITY and TTM_NUM_MEM_TYPES are move from ttm to drm, so: s/TTM_MAX_BO_PRIORITY/DRM_MAX_LRU_PRIORITY s/TTM_NUM_MEM_TYPES/DRM_NUM_MEM_TYPES Signed-off-by: Oak Zeng <oak.zeng@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 2 +- drivers/gpu/drm/i915/intel_region_ttm.c | 2 +- drivers/gpu/drm/i915/selftests/mock_region.c | 2 +- drivers/gpu/drm/ttm/ttm_device.c | 8 ++++---- drivers/gpu/drm/ttm/ttm_resource.c | 12 ++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 2 +- drivers/gpu/drm/xe/xe_bo.h | 2 +- include/drm/ttm/ttm_device.h | 6 +++--- include/drm/ttm/ttm_range_manager.h | 4 ++-- include/drm/ttm/ttm_resource.h | 10 ++++------ 11 files changed, 25 insertions(+), 27 deletions(-)