Message ID | 20191126202717.30762-2-thomas_os@shipmail.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/2] mm: Add and export vmf_insert_mixed_prot() | expand |
Am 26.11.19 um 21:27 schrieb Thomas Hellström (VMware): > From: Thomas Hellstrom <thellstrom@vmware.com> > > We were using an ugly hack to set the page protection correctly. > Fix that and instead use vmf_insert_mixed_prot() and / or > vmf_insert_pfn_prot(). > Also get the default page protection from > struct vm_area_struct::vm_page_prot rather than using vm_get_page_prot(). > This way we catch modifications done by the vm system for drivers that > want write-notification. > > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Michal Hocko <mhocko@suse.com> > Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > Cc: Ralph Campbell <rcampbell@nvidia.com> > Cc: "Jérôme Glisse" <jglisse@redhat.com> > Cc: "Christian König" <christian.koenig@amd.com> > Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Christian König <christian.koenig@amd.com> > --- > drivers/gpu/drm/ttm/ttm_bo_vm.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c > index e6495ca2630b..2098f8d4dfc5 100644 > --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c > +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c > @@ -173,7 +173,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > pgoff_t num_prefault) > { > struct vm_area_struct *vma = vmf->vma; > - struct vm_area_struct cvma = *vma; > struct ttm_buffer_object *bo = vma->vm_private_data; > struct ttm_bo_device *bdev = bo->bdev; > unsigned long page_offset; > @@ -244,7 +243,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > goto out_io_unlock; > } > > - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot); > + prot = ttm_io_prot(bo->mem.placement, prot); > if (!bo->mem.bus.is_iomem) { > struct ttm_operation_ctx ctx = { > .interruptible = false, > @@ -260,7 +259,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > } > } else { > /* Iomem should not be marked encrypted */ > - cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); > + prot = pgprot_decrypted(prot); > } > > /* > @@ -284,10 +283,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, > } > > if (vma->vm_flags & VM_MIXEDMAP) > - ret = vmf_insert_mixed(&cvma, address, > - __pfn_to_pfn_t(pfn, PFN_DEV)); > + ret = vmf_insert_mixed_prot(vma, address, > + __pfn_to_pfn_t(pfn, PFN_DEV), > + prot); > else > - ret = vmf_insert_pfn(&cvma, address, pfn); > + ret = vmf_insert_pfn_prot(vma, address, pfn, prot); > > /* Never error on prefaulted PTEs */ > if (unlikely((ret & VM_FAULT_ERROR))) { > @@ -319,7 +319,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) > if (ret) > return ret; > > - prot = vm_get_page_prot(vma->vm_flags); > + prot = vma->vm_page_prot; > ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); > if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) > return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index e6495ca2630b..2098f8d4dfc5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -173,7 +173,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgoff_t num_prefault) { struct vm_area_struct *vma = vmf->vma; - struct vm_area_struct cvma = *vma; struct ttm_buffer_object *bo = vma->vm_private_data; struct ttm_bo_device *bdev = bo->bdev; unsigned long page_offset; @@ -244,7 +243,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, goto out_io_unlock; } - cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, prot); + prot = ttm_io_prot(bo->mem.placement, prot); if (!bo->mem.bus.is_iomem) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -260,7 +259,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } } else { /* Iomem should not be marked encrypted */ - cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot); + prot = pgprot_decrypted(prot); } /* @@ -284,10 +283,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, } if (vma->vm_flags & VM_MIXEDMAP) - ret = vmf_insert_mixed(&cvma, address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vmf_insert_mixed_prot(vma, address, + __pfn_to_pfn_t(pfn, PFN_DEV), + prot); else - ret = vmf_insert_pfn(&cvma, address, pfn); + ret = vmf_insert_pfn_prot(vma, address, pfn, prot); /* Never error on prefaulted PTEs */ if (unlikely((ret & VM_FAULT_ERROR))) { @@ -319,7 +319,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) if (ret) return ret; - prot = vm_get_page_prot(vma->vm_flags); + prot = vma->vm_page_prot; ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret;