Message ID | 20210521070855.2015094-1-linmiaohe@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm/swapfile: move scan_swap_map() under CONFIG_HIBERNATION | expand |
Please just fold it into the only caller and update the comments referring to it. Something like: diff --git a/mm/swapfile.c b/mm/swapfile.c index 149e77454e3c..0e575b45bb87 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -452,10 +452,10 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, unsigned int idx) { /* - * If scan_swap_map() can't find a free cluster, it will check + * If scan_swap_map_slots() can't find a free cluster, it will check * si->swap_map directly. To make sure the discarding cluster isn't - * taken by scan_swap_map(), mark the swap entries bad (occupied). It - * will be cleared after discard + * taken by scan_swap_map_slots(), mark the swap entries bad (occupied). + * It will be cleared after discard */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); @@ -580,7 +580,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p, } /* - * It's possible scan_swap_map() uses a free cluster in the middle of free + * It's possible scan_swap_map_slots() uses a free cluster in the middle of free * cluster list. Avoiding such abuse to avoid list corruption. */ static bool @@ -1028,21 +1028,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) swap_range_free(si, offset, SWAPFILE_CLUSTER); } -static unsigned long scan_swap_map(struct swap_info_struct *si, - unsigned char usage) -{ - swp_entry_t entry; - int n_ret; - - n_ret = scan_swap_map_slots(si, usage, 1, &entry); - - if (n_ret) - return swp_offset(entry); - else - return 0; - -} - int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) { unsigned long size = swap_entry_size(entry_size); @@ -1105,14 +1090,14 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) nextsi: /* * if we got here, it's likely that si was almost full before, - * and since scan_swap_map() can drop the si->lock, multiple - * callers probably all tried to get a page from the same si - * and it filled up before we could get one; or, the si filled - * up between us dropping swap_avail_lock and taking si->lock. - * Since we dropped the swap_avail_lock, the swap_avail_head - * list may have been modified; so if next is still in the - * swap_avail_head list then try it, otherwise start over - * if we have not gotten any slots. + * and since scan_swap_map_slots() can drop the si->lock, + * multiple callers probably all tried to get a page from the + * same si and it filled up before we could get one; or, the si + * filled up between us dropping swap_avail_lock and taking + * si->lock. Since we dropped the swap_avail_lock, the + * swap_avail_head list may have been modified; so if next is + * still in the swap_avail_head list then try it, otherwise + * start over if we have not gotten any slots. */ if (plist_node_empty(&next->avail_lists[node])) goto start_over; @@ -1132,24 +1117,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type); - pgoff_t offset; + swp_entry_t entry = { }; if (!si) goto fail; + /* This is called for allocating swap entry, not cache */ spin_lock(&si->lock); - if (si->flags & SWP_WRITEOK) { - /* This is called for allocating swap entry, not cache */ - offset = scan_swap_map(si, 1); - if (offset) { - atomic_long_dec(&nr_swap_pages); - spin_unlock(&si->lock); - return swp_entry(type, offset); - } - } + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) + atomic_long_dec(&nr_swap_pages); spin_unlock(&si->lock); fail: - return (swp_entry_t) {0}; + return entry; } static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
On 2021/5/24 20:06, Christoph Hellwig wrote: > Please just fold it into the only caller and update the comments > referring to it. Something like: Sounds reasonable. Many thanks for your suggestion! Will do it in next version. > > diff --git a/mm/swapfile.c b/mm/swapfile.c > index 149e77454e3c..0e575b45bb87 100644 > --- a/mm/swapfile.c > +++ b/mm/swapfile.c > @@ -452,10 +452,10 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, > unsigned int idx) > { > /* > - * If scan_swap_map() can't find a free cluster, it will check > + * If scan_swap_map_slots() can't find a free cluster, it will check > * si->swap_map directly. To make sure the discarding cluster isn't > - * taken by scan_swap_map(), mark the swap entries bad (occupied). It > - * will be cleared after discard > + * taken by scan_swap_map_slots(), mark the swap entries bad (occupied). > + * It will be cleared after discard > */ > memset(si->swap_map + idx * SWAPFILE_CLUSTER, > SWAP_MAP_BAD, SWAPFILE_CLUSTER); > @@ -580,7 +580,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p, > } > > /* > - * It's possible scan_swap_map() uses a free cluster in the middle of free > + * It's possible scan_swap_map_slots() uses a free cluster in the middle of free > * cluster list. Avoiding such abuse to avoid list corruption. > */ > static bool > @@ -1028,21 +1028,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) > swap_range_free(si, offset, SWAPFILE_CLUSTER); > } > > -static unsigned long scan_swap_map(struct swap_info_struct *si, > - unsigned char usage) > -{ > - swp_entry_t entry; > - int n_ret; > - > - n_ret = scan_swap_map_slots(si, usage, 1, &entry); > - > - if (n_ret) > - return swp_offset(entry); > - else > - return 0; > - > -} > - > int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) > { > unsigned long size = swap_entry_size(entry_size); > @@ -1105,14 +1090,14 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) > nextsi: > /* > * if we got here, it's likely that si was almost full before, > - * and since scan_swap_map() can drop the si->lock, multiple > - * callers probably all tried to get a page from the same si > - * and it filled up before we could get one; or, the si filled > - * up between us dropping swap_avail_lock and taking si->lock. > - * Since we dropped the swap_avail_lock, the swap_avail_head > - * list may have been modified; so if next is still in the > - * swap_avail_head list then try it, otherwise start over > - * if we have not gotten any slots. > + * and since scan_swap_map_slots() can drop the si->lock, > + * multiple callers probably all tried to get a page from the > + * same si and it filled up before we could get one; or, the si > + * filled up between us dropping swap_avail_lock and taking > + * si->lock. Since we dropped the swap_avail_lock, the > + * swap_avail_head list may have been modified; so if next is > + * still in the swap_avail_head list then try it, otherwise > + * start over if we have not gotten any slots. > */ > if (plist_node_empty(&next->avail_lists[node])) > goto start_over; > @@ -1132,24 +1117,18 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) > swp_entry_t get_swap_page_of_type(int type) > { > struct swap_info_struct *si = swap_type_to_swap_info(type); > - pgoff_t offset; > + swp_entry_t entry = { }; > > if (!si) > goto fail; > > + /* This is called for allocating swap entry, not cache */ > spin_lock(&si->lock); > - if (si->flags & SWP_WRITEOK) { > - /* This is called for allocating swap entry, not cache */ > - offset = scan_swap_map(si, 1); > - if (offset) { > - atomic_long_dec(&nr_swap_pages); > - spin_unlock(&si->lock); > - return swp_entry(type, offset); > - } > - } > + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) > + atomic_long_dec(&nr_swap_pages); > spin_unlock(&si->lock); > fail: > - return (swp_entry_t) {0}; > + return entry; > } > > static struct swap_info_struct *__swap_info_get(swp_entry_t entry) > . >
diff --git a/mm/swapfile.c b/mm/swapfile.c index 4e7628b82e63..53d2161edfc0 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1037,21 +1037,6 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) swap_range_free(si, offset, SWAPFILE_CLUSTER); } -static unsigned long scan_swap_map(struct swap_info_struct *si, - unsigned char usage) -{ - swp_entry_t entry; - int n_ret; - - n_ret = scan_swap_map_slots(si, usage, 1, &entry); - - if (n_ret) - return swp_offset(entry); - else - return 0; - -} - int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) { unsigned long size = swap_entry_size(entry_size); @@ -1789,6 +1774,21 @@ int free_swap_and_cache(swp_entry_t entry) #ifdef CONFIG_HIBERNATION +static unsigned long scan_swap_map(struct swap_info_struct *si, + unsigned char usage) +{ + swp_entry_t entry; + int n_ret; + + n_ret = scan_swap_map_slots(si, usage, 1, &entry); + + if (n_ret) + return swp_offset(entry); + else + return 0; + +} + swp_entry_t get_swap_page_of_type(int type) { struct swap_info_struct *si = swap_type_to_swap_info(type);
We should move scan_swap_map() under CONFIG_HIBERNATION since the only caller of this function is get_swap_page_of_type() which is also under CONFIG_HIBERNATION. And this fixes the unused-function warning of scan_swap_map(). Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> --- Hi Andrew, Please feel free to merge this with mm-swapfile-move-get_swap_page_of_type-under-config_hibernation.patch --- mm/swapfile.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-)