@@ -444,6 +444,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
prange->pages_addr = NULL;
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
+ svm_range_free_dma_mappings(prange);
out_free:
kvfree(buf);
@@ -173,9 +173,8 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
}
}
-static void svm_range_free_dma_mappings(struct svm_range *prange)
+void svm_range_free_dma_mappings(struct svm_range *prange)
{
- DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE);
struct kfd_dev *kfd_dev;
dma_addr_t *dma_addr;
struct device *dev;
@@ -184,13 +183,8 @@ static void svm_range_free_dma_mappings(struct svm_range *prange)
int r;
p = container_of(prange->svms, struct kfd_process, svms);
- if (p->xnack_enabled)
- bitmap_copy(bitmap, prange->bitmap_aip, MAX_GPU_INSTANCE);
- else
- bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip,
- MAX_GPU_INSTANCE);
- for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) {
+ for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) {
dma_addr = prange->dma_addr[gpuidx];
if (!dma_addr)
continue;
@@ -292,6 +286,7 @@ svm_range_validate_ram(struct mm_struct *mm, struct svm_range *prange)
}
kvfree(prange->pages_addr);
+ svm_range_free_dma_mappings(prange);
prange->pages_addr = prange->hmm_range->hmm_pfns;
prange->hmm_range->hmm_pfns = NULL;
@@ -1192,11 +1187,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start,
if (r)
break;
}
-
- svm_range_dma_unmap(adev->dev, prange->dma_addr[gpuidx],
- start - prange->start,
- last - start + 1);
-
amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev,
p->pasid);
}
@@ -175,5 +175,6 @@ void svm_range_add_list_work(struct svm_range_list *svms,
void schedule_deferred_list_work(struct svm_range_list *svms);
void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
unsigned long offset, unsigned long npages);
+void svm_range_free_dma_mappings(struct svm_range *prange);
#endif /* KFD_SVM_H_ */
Don't dma_unmap in unmap_from_gpu. The dma_addr arrays are protected by the migrate_mutex, which we cannot hold when unmapping in MMU notifiers. Instead dma_unmap and free dma_addr arrays whenever the pages_array is invalidated: when migrating to VRAM and when re-validating RAM. Freeing dma_addr arrays in svm_migrate_vma_to_vram fixes a bug where the stale system memory pages were mapped instead of VRAM after a migration. When freeing dma_addr arrays, ignore the access bitmasks. Those masks may have changed since the dma_addr arrays were allocated and mapped. Change-Id: I01a6121c4c9908c1da4f303e87dcafd509fabc86 Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 16 +++------------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 1 + 3 files changed, 5 insertions(+), 13 deletions(-)