Message ID | 20191126202717.30762-1-thomas_os@shipmail.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/2] mm: Add and export vmf_insert_mixed_prot() | expand |
Am 26.11.19 um 21:27 schrieb Thomas Hellström (VMware): > From: Thomas Hellstrom <thellstrom@vmware.com> > > The TTM module today uses a hack to be able to set a different page > protection than struct vm_area_struct::vm_page_prot. To be able to do > this properly, add and export vmf_insert_mixed_prot(). > > Cc: Andrew Morton <akpm@linux-foundation.org> > Cc: Michal Hocko <mhocko@suse.com> > Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> > Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> > Cc: Ralph Campbell <rcampbell@nvidia.com> > Cc: "Jérôme Glisse" <jglisse@redhat.com> > Cc: "Christian König" <christian.koenig@amd.com> > Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Acked-by: Christian König <christian.koenig@amd.com> > --- > include/linux/mm.h | 2 ++ > mm/memory.c | 15 +++++++++++---- > 2 files changed, 13 insertions(+), 4 deletions(-) > > diff --git a/include/linux/mm.h b/include/linux/mm.h > index cc292273e6ba..29575d3c1e47 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2548,6 +2548,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, > unsigned long pfn, pgprot_t pgprot); > vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, > pfn_t pfn); > +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, > + pfn_t pfn, pgprot_t pgprot); > vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, > unsigned long addr, pfn_t pfn); > int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); > diff --git a/mm/memory.c b/mm/memory.c > index b1ca51a079f2..28f162e28144 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -1719,9 +1719,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) > } > > static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, > - unsigned long addr, pfn_t pfn, bool mkwrite) > + unsigned long addr, pfn_t pfn, pgprot_t pgprot, > + bool mkwrite) > { > - pgprot_t pgprot = vma->vm_page_prot; > int err; > > BUG_ON(!vm_mixed_ok(vma, pfn)); > @@ -1764,10 +1764,17 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, > return VM_FAULT_NOPAGE; > } > > +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, > + pfn_t pfn, pgprot_t pgprot) > +{ > + return __vm_insert_mixed(vma, addr, pfn, pgprot, false); > +} > +EXPORT_SYMBOL(vmf_insert_mixed_prot); > + > vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, > pfn_t pfn) > { > - return __vm_insert_mixed(vma, addr, pfn, false); > + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); > } > EXPORT_SYMBOL(vmf_insert_mixed); > > @@ -1779,7 +1786,7 @@ EXPORT_SYMBOL(vmf_insert_mixed); > vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, > unsigned long addr, pfn_t pfn) > { > - return __vm_insert_mixed(vma, addr, pfn, true); > + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); > } > EXPORT_SYMBOL(vmf_insert_mixed_mkwrite); >
diff --git a/include/linux/mm.h b/include/linux/mm.h index cc292273e6ba..29575d3c1e47 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2548,6 +2548,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn, pgprot_t pgprot); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); diff --git a/mm/memory.c b/mm/memory.c index b1ca51a079f2..28f162e28144 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1719,9 +1719,9 @@ static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn) } static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, - unsigned long addr, pfn_t pfn, bool mkwrite) + unsigned long addr, pfn_t pfn, pgprot_t pgprot, + bool mkwrite) { - pgprot_t pgprot = vma->vm_page_prot; int err; BUG_ON(!vm_mixed_ok(vma, pfn)); @@ -1764,10 +1764,17 @@ static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr, + pfn_t pfn, pgprot_t pgprot) +{ + return __vm_insert_mixed(vma, addr, pfn, pgprot, false); +} +EXPORT_SYMBOL(vmf_insert_mixed_prot); + vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { - return __vm_insert_mixed(vma, addr, pfn, false); + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false); } EXPORT_SYMBOL(vmf_insert_mixed); @@ -1779,7 +1786,7 @@ EXPORT_SYMBOL(vmf_insert_mixed); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr, pfn_t pfn) { - return __vm_insert_mixed(vma, addr, pfn, true); + return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true); } EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);