@@ -368,7 +368,7 @@ static inline void dax_mapping_set_cow(struct page *page)
* FS_DAX_MAPPING_COW, and use page->index as refcount.
*/
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address, bool cow)
+ struct vm_fault *vmf, unsigned long flags)
{
unsigned long size = dax_entry_size(entry), pfn, index;
int i = 0;
@@ -376,11 +376,11 @@ static void dax_associate_entry(void *entry, struct address_space *mapping,
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
- index = linear_page_index(vma, address & ~(size - 1));
+ index = linear_page_index(vmf->vma, ALIGN(vmf->address, size));
for_each_mapped_pfn(entry, pfn) {
struct page *page = pfn_to_page(pfn);
- if (cow) {
+ if (flags & DAX_COW) {
dax_mapping_set_cow(page);
} else {
WARN_ON_ONCE(page->mapping);
@@ -916,8 +916,7 @@ static vm_fault_t dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
- cow);
+ dax_associate_entry(new_entry, mapping, vmf, flags);
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
Pass @vmf to drop the separate @vma and @address arguments to dax_associate_entry(), use the existing DAX flags to convey the @cow argument, and replace the open-coded ALIGN(). Cc: Matthew Wilcox <willy@infradead.org> Cc: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <djwong@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Christoph Hellwig <hch@lst.de> Cc: John Hubbard <jhubbard@nvidia.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/dax.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-)