@@ -407,7 +407,7 @@ static void dax_disassociate_entry(void *entry, struct address_space *mapping,
}
}
-static struct page *dax_busy_page(void *entry)
+static struct page *dax_pinned_page(void *entry)
{
unsigned long pfn;
@@ -665,7 +665,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
}
/**
- * dax_layout_busy_page_range - find first pinned page in @mapping
+ * dax_layout_pinned_page_range - find first pinned page in @mapping
* @mapping: address space to scan for a page with ref count > 1
* @start: Starting offset. Page containing 'start' is included.
* @end: End offset. Page containing 'end' is included. If 'end' is LLONG_MAX,
@@ -682,7 +682,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
* to be able to run unmap_mapping_range() and subsequently not race
* mapping_mapped() becoming true.
*/
-struct page *dax_layout_busy_page_range(struct address_space *mapping,
+struct page *dax_layout_pinned_page_range(struct address_space *mapping,
loff_t start, loff_t end)
{
void *entry;
@@ -727,7 +727,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
if (unlikely(dax_is_locked(entry)))
entry = get_unlocked_entry(&xas, 0);
if (entry)
- page = dax_busy_page(entry);
+ page = dax_pinned_page(entry);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
if (page)
break;
@@ -742,13 +742,13 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
xas_unlock_irq(&xas);
return page;
}
-EXPORT_SYMBOL_GPL(dax_layout_busy_page_range);
+EXPORT_SYMBOL_GPL(dax_layout_pinned_page_range);
-struct page *dax_layout_busy_page(struct address_space *mapping)
+struct page *dax_layout_pinned_page(struct address_space *mapping)
{
- return dax_layout_busy_page_range(mapping, 0, LLONG_MAX);
+ return dax_layout_pinned_page_range(mapping, 0, LLONG_MAX);
}
-EXPORT_SYMBOL_GPL(dax_layout_busy_page);
+EXPORT_SYMBOL_GPL(dax_layout_pinned_page);
static int __dax_invalidate_entry(struct address_space *mapping,
pgoff_t index, bool trunc)
@@ -3957,7 +3957,7 @@ int ext4_break_layouts(struct inode *inode)
return -EINVAL;
do {
- page = dax_layout_busy_page(inode->i_mapping);
+ page = dax_layout_pinned_page(inode->i_mapping);
if (!page)
return 0;
@@ -443,7 +443,7 @@ static int fuse_setup_new_dax_mapping(struct inode *inode, loff_t pos,
/*
* Can't do inline reclaim in fault path. We call
- * dax_layout_busy_page() before we free a range. And
+ * dax_layout_pinned_page() before we free a range. And
* fuse_wait_dax_page() drops mapping->invalidate_lock and requires it.
* In fault path we enter with mapping->invalidate_lock held and can't
* drop it. Also in fault path we hold mapping->invalidate_lock shared
@@ -671,7 +671,7 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
{
struct page *page;
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+ page = dax_layout_pinned_page_range(inode->i_mapping, start, end);
if (!page)
return 0;
@@ -822,7 +822,7 @@ xfs_break_dax_layouts(
ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
- page = dax_layout_busy_page(inode->i_mapping);
+ page = dax_layout_pinned_page(inode->i_mapping);
if (!page)
return 0;
@@ -3481,7 +3481,7 @@ xfs_mmaplock_two_inodes_and_break_dax_layout(
* need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
* for this nested lock case.
*/
- page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
+ page = dax_layout_pinned_page(VFS_I(ip2)->i_mapping);
if (page && page_ref_count(page) != 1) {
xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
@@ -157,8 +157,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev, void *holder)
int dax_writeback_mapping_range(struct address_space *mapping,
struct dax_device *dax_dev, struct writeback_control *wbc);
-struct page *dax_layout_busy_page(struct address_space *mapping);
-struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+struct page *dax_layout_pinned_page(struct address_space *mapping);
+struct page *dax_layout_pinned_page_range(struct address_space *mapping, loff_t start, loff_t end);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
@@ -166,12 +166,14 @@ dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
void dax_unlock_mapping_entry(struct address_space *mapping,
unsigned long index, dax_entry_t cookie);
#else
-static inline struct page *dax_layout_busy_page(struct address_space *mapping)
+static inline struct page *dax_layout_pinned_page(struct address_space *mapping)
{
return NULL;
}
-static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages)
+static inline struct page *
+dax_layout_pinned_page_range(struct address_space *mapping, pgoff_t start,
+ pgoff_t nr_pages)
{
return NULL;
}
The FSDAX need to hold of truncate is for pages undergoing DMA. Replace the DAX specific "busy" terminology with the "pinned" term. This is in preparation from moving FSDAX from watching transitions of page->_refcount to '1' with observations of page_maybe_dma_pinned() returning false. Cc: Matthew Wilcox <willy@infradead.org> Cc: Jan Kara <jack@suse.cz> Cc: "Darrick J. Wong" <djwong@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com> --- fs/dax.c | 16 ++++++++-------- fs/ext4/inode.c | 2 +- fs/fuse/dax.c | 4 ++-- fs/xfs/xfs_file.c | 2 +- fs/xfs/xfs_inode.c | 2 +- include/linux/dax.h | 10 ++++++---- 6 files changed, 19 insertions(+), 17 deletions(-)