@@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
@@ -79,7 +79,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
}
static inline void rmap_walk_ksm(struct folio *folio,
- struct rmap_walk_control *rwc)
+ const struct rmap_walk_control *rwc)
{
}
@@ -294,8 +294,8 @@ struct rmap_walk_control {
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
+void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -2601,7 +2601,7 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}
-void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
+void rmap_walk_ksm(struct folio *folio, const struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
@@ -107,7 +107,7 @@ static void page_idle_clear_pte_refs(struct page *page)
if (need_lock && !folio_trylock(folio))
return;
- rmap_walk(folio, (struct rmap_walk_control *)&rwc);
+ rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
@@ -2273,7 +2273,7 @@ void __put_anon_vma(struct anon_vma *anon_vma)
}
static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
- struct rmap_walk_control *rwc)
+ const struct rmap_walk_control *rwc)
{
struct anon_vma *anon_vma;
@@ -2308,8 +2308,8 @@ static struct anon_vma *rmap_walk_anon_lock(struct folio *folio,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
-static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
- bool locked)
+static void rmap_walk_anon(struct folio *folio,
+ const struct rmap_walk_control *rwc, bool locked)
{
struct anon_vma *anon_vma;
pgoff_t pgoff_start, pgoff_end;
@@ -2361,8 +2361,8 @@ static void rmap_walk_anon(struct folio *folio, struct rmap_walk_control *rwc,
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
-static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
- bool locked)
+static void rmap_walk_file(struct folio *folio,
+ const struct rmap_walk_control *rwc, bool locked)
{
struct address_space *mapping = folio_mapping(folio);
pgoff_t pgoff_start, pgoff_end;
@@ -2404,7 +2404,7 @@ static void rmap_walk_file(struct folio *folio, struct rmap_walk_control *rwc,
i_mmap_unlock_read(mapping);
}
-void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
+void rmap_walk(struct folio *folio, const struct rmap_walk_control *rwc)
{
if (unlikely(folio_test_ksm(folio)))
rmap_walk_ksm(folio, rwc);
@@ -2415,7 +2415,7 @@ void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc)
}
/* Like rmap_walk, but caller holds relevant rmap lock */
-void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
+void rmap_walk_locked(struct folio *folio, const struct rmap_walk_control *rwc)
{
/* no ksm support for now */
VM_BUG_ON_FOLIO(folio_test_ksm(folio), folio);
The rmap walking functions do not modify the rmap_walk_control, and page_idle_clear_pte_refs() takes advantage of that to move construction of the rmap_walk_control to compile time. This lets us remove an unclean cast. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/ksm.h | 4 ++-- include/linux/rmap.h | 4 ++-- mm/ksm.c | 2 +- mm/page_idle.c | 2 +- mm/rmap.c | 14 +++++++------- 5 files changed, 13 insertions(+), 13 deletions(-)