@@ -139,17 +139,6 @@ static void page_cache_delete(struct address_space *mapping,
page->mapping = NULL;
/* Leave page->index set: truncation lookup relies upon it */
-
- if (shadow) {
- mapping->nrexceptional += nr;
- /*
- * Make sure the nrexceptional update is committed before
- * the nrpages update so that final truncate racing
- * with reclaim does not see both counters 0 at the
- * same time and miss a shadow entry.
- */
- smp_wmb();
- }
mapping->nrpages -= nr;
}
@@ -860,7 +849,6 @@ static int __add_to_page_cache_locked(struct page *page,
goto unlock;
if (xa_is_value(old)) {
- mapping->nrexceptional--;
if (shadowp)
*shadowp = old;
}
@@ -40,7 +40,6 @@ static inline void __clear_shadow_entry(struct address_space *mapping,
if (xas_load(&xas) != entry)
return;
xas_store(&xas, NULL);
- mapping->nrexceptional--;
}
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
@@ -547,7 +547,6 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out_invalid;
if (WARN_ON_ONCE(node->count != node->nr_values))
goto out_invalid;
- mapping->nrexceptional -= node->nr_values;
xas.xa_node = xa_parent_locked(&mapping->i_pages, node);
xas.xa_offset = node->offset;
xas.xa_shift = node->shift + XA_CHUNK_SHIFT;
We no longer need to keep track of how many shadow entries are present in a mapping. This saves a few writes to the inode and memory barriers. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/filemap.c | 12 ------------ mm/truncate.c | 1 - mm/workingset.c | 1 - 3 files changed, 14 deletions(-)