@@ -294,6 +294,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
+ bool update_to_read_only;
unsigned long end;
/* notification is exclusive, but interval is inclusive */
@@ -302,6 +303,8 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
if (amdgpu_mn_read_lock(amn, range->blockable))
return -EAGAIN;
+ update_to_read_only = mmu_notifier_range_update_to_read_only(range);
+
it = interval_tree_iter_first(&amn->objects, range->start, end);
while (it) {
struct amdgpu_mn_node *node;
@@ -317,6 +320,16 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
list_for_each_entry(bo, &node->bos, mn_list) {
struct kgd_mem *mem = bo->kfd_bo;
+ bool read_only;
+
+ /*
+ * If it is already read only and we are updating to
+ * read only then we do not need to change anything.
+ * So save time and skip this one.
+ */
+ read_only = amdgpu_ttm_tt_is_readonly(bo->tbo.ttm);
+ if (update_to_read_only && read_only)
+ continue;
if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
range->start,