@@ -999,7 +999,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
- bool is_cached = false;
+ bool immediate_free = false;
unsigned count, i, npages = 0;
unsigned long irq_flags;
@@ -1034,8 +1034,17 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
if (!pool)
return;
- is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+ /*
+ * If memory is cached and sev encryption is not active, allocating
+ * and freeing coherent memory is relatively cheap, so we can free
+ * it immediately. If sev encryption is active, allocating coherent
+ * memory involves a call to set_memory_decrypted() which is very
+ * expensive, so cache coherent pages is sev is active.
+ */
+ immediate_free = (ttm_dma_find_pool
+ (pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool &&
+ !sev_active());
/* make sure pages array match list and count number of pages */
count = 0;
@@ -1050,13 +1059,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
d_page->vaddr &= ~VADDR_FLAG_UPDATED_COUNT;
}
- if (is_cached)
+ if (immediate_free)
ttm_dma_page_put(pool, d_page);
}
spin_lock_irqsave(&pool->lock, irq_flags);
pool->npages_in_use -= count;
- if (is_cached) {
+ if (immediate_free) {
pool->nfrees += count;
} else {
pool->npages_free += count;