@@ -890,14 +890,14 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
free_stable_node(stable_node);
}
-enum get_ksm_page_flags {
+enum get_ksm_folio_flags {
GET_KSM_PAGE_NOLOCK,
GET_KSM_PAGE_LOCK,
GET_KSM_PAGE_TRYLOCK
};
/*
- * get_ksm_page: checks if the page indicated by the stable node
+ * get_ksm_folio: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
* In which case we can trust the content of the page, and it
* returns the gotten page; but if the page has now been zapped,
@@ -915,8 +915,8 @@ enum get_ksm_page_flags {
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static void *get_ksm_page(struct ksm_stable_node *stable_node,
- enum get_ksm_page_flags flags)
+static struct folio *get_ksm_folio(struct ksm_stable_node *stable_node,
+ enum get_ksm_folio_flags flags)
{
struct folio *folio;
void *expected_mapping;
@@ -1001,7 +1001,7 @@ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
struct folio *folio;
stable_node = rmap_item->head;
- folio = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
+ folio = get_ksm_folio(stable_node, GET_KSM_PAGE_LOCK);
if (!folio)
goto out;
@@ -1110,10 +1110,10 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
struct folio *folio;
int err;
- folio = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
+ folio = get_ksm_folio(stable_node, GET_KSM_PAGE_LOCK);
if (!folio) {
/*
- * get_ksm_page did remove_node_from_stable_tree itself.
+ * get_ksm_folio did remove_node_from_stable_tree itself.
*/
return 0;
}
@@ -1126,7 +1126,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
err = -EBUSY;
if (!folio_mapped(folio)) {
/*
- * The stable node did not yet appear stale to get_ksm_page(),
+ * The stable node did not yet appear stale to get_ksm_folio(),
* since that allows for an unmapped ksm folio to be recognized
* right up until it is freed; but the node is safe to remove.
* This folio might be in an LRU cache waiting to be freed,
@@ -1641,13 +1641,13 @@ static void *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
* We must walk all stable_node_dup to prune the stale
* stable nodes during lookup.
*
- * get_ksm_page can drop the nodes from the
+ * get_ksm_folio can drop the nodes from the
* stable_node->hlist if they point to freed pages
* (that's why we do a _safe walk). The "dup"
* stable_node parameter itself will be freed from
* under us if it returns NULL.
*/
- folio = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
+ folio = get_ksm_folio(dup, GET_KSM_PAGE_NOLOCK);
if (!folio)
continue;
nr += 1;
@@ -1748,7 +1748,7 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl
}
/*
- * Like for get_ksm_page, this function can free the *_stable_node and
+ * Like for get_ksm_folio, this function can free the *_stable_node and
* *_stable_node_dup if the returned tree_page is NULL.
*
* It can also free and overwrite *_stable_node with the found
@@ -1770,7 +1770,7 @@ static void *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
- return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
+ return get_ksm_folio(stable_node, GET_KSM_PAGE_NOLOCK);
}
/*
* _stable_node_dup set to NULL means the stable_node
@@ -1877,14 +1877,14 @@ static void *stable_tree_search(struct page *page)
* write protected at all times. Any will work
* fine to continue the walk.
*/
- tree_folio = get_ksm_page(stable_node_any,
- GET_KSM_PAGE_NOLOCK);
+ tree_folio = get_ksm_folio(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * get_ksm_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -1938,8 +1938,8 @@ static void *stable_tree_search(struct page *page)
* It would be more elegant to return stable_node
* than kpage, but that involves more changes.
*/
- tree_folio = get_ksm_page(stable_node_dup,
- GET_KSM_PAGE_TRYLOCK);
+ tree_folio = get_ksm_folio(stable_node_dup,
+ GET_KSM_PAGE_TRYLOCK);
if (PTR_ERR(tree_folio) == -EBUSY)
return ERR_PTR(-EBUSY);
@@ -2110,14 +2110,14 @@ static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
* write protected at all times. Any will work
* fine to continue the walk.
*/
- tree_folio = get_ksm_page(stable_node_any,
- GET_KSM_PAGE_NOLOCK);
+ tree_folio = get_ksm_folio(stable_node_any,
+ GET_KSM_PAGE_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * get_ksm_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -2601,8 +2601,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
list_for_each_entry_safe(stable_node, next,
&migrate_nodes, list) {
- folio = get_ksm_page(stable_node,
- GET_KSM_PAGE_NOLOCK);
+ folio = get_ksm_folio(stable_node,
+ GET_KSM_PAGE_NOLOCK);
if (folio)
folio_put(folio);
cond_resched();
@@ -3229,7 +3229,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
/*
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
- * to get_ksm_page() before it can see that folio->mapping
+ * to get_ksm_folio() before it can see that folio->mapping
* has gone stale (or that folio_test_swapcache has been cleared).
*/
smp_wmb();
@@ -3256,7 +3256,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
if (stable_node->kpfn >= start_pfn &&
stable_node->kpfn < end_pfn) {
/*
- * Don't get_ksm_page, page has already gone:
+ * Don't get_ksm_folio, page has already gone:
* which is why we keep kpfn instead of page*
*/
remove_node_from_stable_tree(stable_node);
@@ -3344,7 +3344,7 @@ static int ksm_memory_callback(struct notifier_block *self,
* Most of the work is done by page migration; but there might
* be a few stable_nodes left over, still pointing to struct
* pages which have been offlined: prune those from the tree,
- * otherwise get_ksm_page() might later try to access a
+ * otherwise get_ksm_folio() might later try to access a
* non-existent struct page.
*/
ksm_check_stable_tree(mn->start_pfn,