Message ID | 20220330102534.1053240-2-chenwandun@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | fix several contiguous memmap assumptions | expand |
On 30.03.22 12:25, Chen Wandun wrote: > It isn't true for only SPARSEMEM configs to assume that a compound page > has virtually contiguous page structs, so use nth_page to iterate each > page. Is this actually a "fix" or rather a preparation for having very large compound pages (>= MAX_ORDER) that we'd be able to split? Naive me would think that we'd currently only have order < MAX_ORDER, and consequently would always fall into a single memory section where the memmap is contiguous. > > Inspired by: > https://lore.kernel.org/linux-mm/20220204195852.1751729-8-willy@infradead.org/ > > Signed-off-by: Chen Wandun <chenwandun@huawei.com> > --- > mm/compaction.c | 6 +++--- > mm/huge_memory.c | 2 +- > mm/page_alloc.c | 2 +- > 3 files changed, 5 insertions(+), 5 deletions(-) > > diff --git a/mm/compaction.c b/mm/compaction.c > index c3e37aa9ff9e..ddff13b968a2 100644 > --- a/mm/compaction.c > +++ b/mm/compaction.c > @@ -87,7 +87,7 @@ static unsigned long release_freepages(struct list_head *freelist) > static void split_map_pages(struct list_head *list) > { > unsigned int i, order, nr_pages; > - struct page *page, *next; > + struct page *page, *next, *tmp; > LIST_HEAD(tmp_list); > > list_for_each_entry_safe(page, next, list, lru) { > @@ -101,8 +101,8 @@ static void split_map_pages(struct list_head *list) > split_page(page, order); > > for (i = 0; i < nr_pages; i++) { > - list_add(&page->lru, &tmp_list); > - page++; > + tmp = nth_page(page, i); > + list_add(&tmp->lru, &tmp_list); > } > } > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 2fe38212e07c..d77fc2ad581d 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2297,7 +2297,7 @@ static void lru_add_page_tail(struct page *head, struct page *tail, > static void __split_huge_page_tail(struct page *head, int tail, > struct lruvec *lruvec, struct list_head *list) > { > - struct page *page_tail = head + tail; > + struct page *page_tail = nth_page(head, tail); > > VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); > > diff --git a/mm/page_alloc.c b/mm/page_alloc.c > index f648decfe39d..855211dea13e 100644 > --- a/mm/page_alloc.c > +++ b/mm/page_alloc.c > @@ -3513,7 +3513,7 @@ void split_page(struct page *page, unsigned int order) > VM_BUG_ON_PAGE(!page_count(page), page); > > for (i = 1; i < (1 << order); i++) > - set_page_refcounted(page + i); > + set_page_refcounted(nth_page(page, i)); > split_page_owner(page, 1 << order); > split_page_memcg(page, 1 << order); > }
diff --git a/mm/compaction.c b/mm/compaction.c index c3e37aa9ff9e..ddff13b968a2 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -87,7 +87,7 @@ static unsigned long release_freepages(struct list_head *freelist) static void split_map_pages(struct list_head *list) { unsigned int i, order, nr_pages; - struct page *page, *next; + struct page *page, *next, *tmp; LIST_HEAD(tmp_list); list_for_each_entry_safe(page, next, list, lru) { @@ -101,8 +101,8 @@ static void split_map_pages(struct list_head *list) split_page(page, order); for (i = 0; i < nr_pages; i++) { - list_add(&page->lru, &tmp_list); - page++; + tmp = nth_page(page, i); + list_add(&tmp->lru, &tmp_list); } } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2fe38212e07c..d77fc2ad581d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2297,7 +2297,7 @@ static void lru_add_page_tail(struct page *head, struct page *tail, static void __split_huge_page_tail(struct page *head, int tail, struct lruvec *lruvec, struct list_head *list) { - struct page *page_tail = head + tail; + struct page *page_tail = nth_page(head, tail); VM_BUG_ON_PAGE(atomic_read(&page_tail->_mapcount) != -1, page_tail); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f648decfe39d..855211dea13e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3513,7 +3513,7 @@ void split_page(struct page *page, unsigned int order) VM_BUG_ON_PAGE(!page_count(page), page); for (i = 1; i < (1 << order); i++) - set_page_refcounted(page + i); + set_page_refcounted(nth_page(page, i)); split_page_owner(page, 1 << order); split_page_memcg(page, 1 << order); }
It isn't true for only SPARSEMEM configs to assume that a compound page has virtually contiguous page structs, so use nth_page to iterate each page. Inspired by: https://lore.kernel.org/linux-mm/20220204195852.1751729-8-willy@infradead.org/ Signed-off-by: Chen Wandun <chenwandun@huawei.com> --- mm/compaction.c | 6 +++--- mm/huge_memory.c | 2 +- mm/page_alloc.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-)