@@ -168,7 +168,8 @@ long ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
unsigned i;
long ret;
- if (reason != TTM_SHRINK_WATERMARK && !bdev->funcs->bo_shrink)
+ if (reason != TTM_SHRINK_WATERMARK &&
+ (!bdev->funcs->bo_shrink || !ttm_pool_can_shrink(&bdev->pool)))
return 0;
spin_lock(&bdev->lru_lock);
@@ -54,6 +54,21 @@ module_param_named(dma32_pages_limit, ttm_dma32_pages_limit, ulong, 0644);
static atomic_long_t ttm_pages_allocated;
static atomic_long_t ttm_dma32_pages_allocated;
+static bool ttm_tt_shrinkable(const struct ttm_device *bdev,
+ const struct ttm_tt *tt)
+{
+ return !!bdev->funcs->bo_shrink &&
+ ttm_pool_can_shrink(&bdev->pool) &&
+ !(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+}
+
+static void ttm_tt_mod_allocated(bool dma32, long value)
+{
+ atomic_long_add(value, &ttm_pages_allocated);
+ if (dma32)
+ atomic_long_add(value, &ttm_dma32_pages_allocated);
+}
+
/*
* Allocates a ttm structure for the given BO.
*/
@@ -304,12 +319,9 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
- atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
- atomic_long_add(ttm->num_pages,
- &ttm_dma32_pages_allocated);
- }
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !ttm_tt_shrinkable(bdev, ttm))
+ ttm_tt_mod_allocated(bdev->pool.use_dma32, ttm->num_pages);
while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
atomic_long_read(&ttm_dma32_pages_allocated) >
@@ -343,12 +355,10 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
- atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
- atomic_long_sub(ttm->num_pages,
- &ttm_dma32_pages_allocated);
- }
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !ttm_tt_shrinkable(bdev, ttm))
+ ttm_tt_mod_allocated(bdev->pool.use_dma32, -(long)ttm->num_pages);
+
return ret;
}
EXPORT_SYMBOL(ttm_tt_populate);
@@ -363,12 +373,9 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
- atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
- if (bdev->pool.use_dma32)
- atomic_long_sub(ttm->num_pages,
- &ttm_dma32_pages_allocated);
- }
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL) &&
+ !ttm_tt_shrinkable(bdev, ttm))
+ ttm_tt_mod_allocated(bdev->pool.use_dma32, -(long)ttm->num_pages);
ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
@@ -89,4 +89,19 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
int ttm_pool_mgr_init(unsigned long num_pages);
void ttm_pool_mgr_fini(void);
+/**
+ * ttm_pool_can_shrink - Whether page allocations from this pool are shrinkable
+ * @pool: The pool.
+ *
+ * Return: true if shrinkable, false if not.
+ */
+static inline bool ttm_pool_can_shrink(const struct ttm_pool *pool)
+{
+ /*
+ * The dma_alloc pool pages can't be inserted into the
+ * swap cache. Nor can they be split.
+ */
+ return !pool->use_dma_alloc;
+}
+
#endif
Clarify the meaning of the ttm_tt pages_limit watermarks as the max number of pages not accessible by shrinkers, and update accordingly so that memory allocated by TTM devices that support shrinking is not accounted against those limits. In particular this means that devices using the dma_alloc pool will still be using the watermark method. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/ttm/ttm_device.c | 3 ++- drivers/gpu/drm/ttm/ttm_tt.c | 43 +++++++++++++++++++------------- include/drm/ttm/ttm_pool.h | 15 +++++++++++ 3 files changed, 42 insertions(+), 19 deletions(-)