@@ -419,11 +419,13 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
page = i * dir + add;
if (old_iomap == NULL) {
pgprot_t prot = ttm_io_prot(old_mem->placement,
+ ttm->page_flags,
PAGE_KERNEL);
ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
prot);
} else if (new_iomap == NULL) {
pgprot_t prot = ttm_io_prot(new_mem->placement,
+ ttm->page_flags,
PAGE_KERNEL);
ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
prot);
@@ -525,11 +527,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp)
{
/* Cached mappings need no adjustment */
if (caching_flags & TTM_PL_FLAG_CACHED)
- return tmp;
+ goto check_encryption;
#if defined(__i386__) || defined(__x86_64__)
if (caching_flags & TTM_PL_FLAG_WC)
@@ -547,6 +549,11 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
#if defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
+
+check_encryption:
+ if (tt_page_flags & TTM_PAGE_FLAG_DECRYPTED)
+ tmp = pgprot_decrypted(tmp);
+
return tmp;
}
EXPORT_SYMBOL(ttm_io_prot);
@@ -593,7 +600,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED) &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_DECRYPTED)) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -607,7 +615,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(mem->placement, ttm->page_flags,
+ PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -229,12 +229,7 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
* by mmap_sem in write mode.
*/
cvma = *vma;
- cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
-
- if (bo->mem.bus.is_iomem) {
- cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
- } else {
+ if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
@@ -244,13 +239,15 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
- cvma.vm_page_prot);
-
- /* Allocate all page at once, most common usage */
- if (ttm_tt_populate(ttm, &ctx)) {
+ ttm->page_flags, cvma.vm_page_prot);
+ if (ttm_tt_populate(bo->ttm, &ctx)) {
ret = VM_FAULT_OOM;
goto out_io_unlock;
}
+ } else {
+ /* Iomem should not be marked encrypted */
+ cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
+ TTM_PAGE_FLAG_DECRYPTED, cvma.vm_page_prot);
}
/*
@@ -984,6 +984,9 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
}
ttm->state = tt_unbound;
+ if (sev_active())
+ ttm->page_flags |= TTM_PAGE_FLAG_DECRYPTED;
+
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
@@ -483,8 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_pages = src->ttm->pages;
d.dst_num_pages = dst->num_pages;
d.src_num_pages = src->num_pages;
- d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+ d.dst_prot = ttm_io_prot(dst->mem.placement, dst->ttm->page_flags,
+ PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src->mem.placement, src->ttm->page_flags,
+ PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
@@ -893,13 +893,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
/**
* ttm_io_prot
*
- * @c_state: Caching state.
+ * @caching_flags: The caching flags of the map.
+ * @tt_page_flags: The tt_page_flags of the map, TTM_PAGE_FLAG_*
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
- * setting up a PTE with the caching model indicated by @c_state.
+ * setting up a PTE with the caching model indicated by @caching_flags,
+ * and encryption state indicated by @tt_page_flags,
*/
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(u32 caching_flags, u32 tt_page_flags, pgprot_t tmp);
extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
@@ -41,6 +41,7 @@ struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
+#define TTM_PAGE_FLAG_DECRYPTED (1 << 10)
enum ttm_caching_state {
tt_uncached,