diff mbox series

[v5,16/21] mm/hugetlb: Flush work when dissolving hugetlb page

Message ID 20201120064325.34492-17-songmuchun@bytedance.com (mailing list archive)
State New, archived
Headers show
Series Free some vmemmap pages of hugetlb page | expand

Commit Message

Muchun Song Nov. 20, 2020, 6:43 a.m. UTC
We should flush work when dissolving a hugetlb page to make sure that
the hugetlb page is freed to the buddy.

Signed-off-by: Muchun Song <songmuchun@bytedance.com>
---
 mm/hugetlb.c | 18 +++++++++++++++++-
 1 file changed, 17 insertions(+), 1 deletion(-)

Comments

Michal Hocko Nov. 20, 2020, 8:20 a.m. UTC | #1
On Fri 20-11-20 14:43:20, Muchun Song wrote:
> We should flush work when dissolving a hugetlb page to make sure that
> the hugetlb page is freed to the buddy.

Why? This explanation on its own doen't really help to understand what
is the point of the patch.

> 
> Signed-off-by: Muchun Song <songmuchun@bytedance.com>
> ---
>  mm/hugetlb.c | 18 +++++++++++++++++-
>  1 file changed, 17 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index b853aacd5c16..9aad0b63d369 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -1328,6 +1328,12 @@ static void update_hpage_vmemmap_workfn(struct work_struct *work)
>  }
>  static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
>  
> +static inline void flush_hpage_update_work(struct hstate *h)
> +{
> +	if (free_vmemmap_pages_per_hpage(h))
> +		flush_work(&hpage_update_work);
> +}
> +
>  static inline void __update_and_free_page(struct hstate *h, struct page *page)
>  {
>  	/* No need to allocate vmemmap pages */
> @@ -1928,6 +1934,7 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
>  int dissolve_free_huge_page(struct page *page)
>  {
>  	int rc = -EBUSY;
> +	struct hstate *h = NULL;
>  
>  	/* Not to disrupt normal path by vainly holding hugetlb_lock */
>  	if (!PageHuge(page))
> @@ -1941,8 +1948,9 @@ int dissolve_free_huge_page(struct page *page)
>  
>  	if (!page_count(page)) {
>  		struct page *head = compound_head(page);
> -		struct hstate *h = page_hstate(head);
>  		int nid = page_to_nid(head);
> +
> +		h = page_hstate(head);
>  		if (h->free_huge_pages - h->resv_huge_pages == 0)
>  			goto out;
>  
> @@ -1956,6 +1964,14 @@ int dissolve_free_huge_page(struct page *page)
>  	}
>  out:
>  	spin_unlock(&hugetlb_lock);
> +
> +	/*
> +	 * We should flush work before return to make sure that
> +	 * the HugeTLB page is freed to the buddy.
> +	 */
> +	if (!rc && h)
> +		flush_hpage_update_work(h);
> +
>  	return rc;
>  }
>  
> -- 
> 2.11.0
diff mbox series

Patch

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b853aacd5c16..9aad0b63d369 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1328,6 +1328,12 @@  static void update_hpage_vmemmap_workfn(struct work_struct *work)
 }
 static DECLARE_WORK(hpage_update_work, update_hpage_vmemmap_workfn);
 
+static inline void flush_hpage_update_work(struct hstate *h)
+{
+	if (free_vmemmap_pages_per_hpage(h))
+		flush_work(&hpage_update_work);
+}
+
 static inline void __update_and_free_page(struct hstate *h, struct page *page)
 {
 	/* No need to allocate vmemmap pages */
@@ -1928,6 +1934,7 @@  static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 int dissolve_free_huge_page(struct page *page)
 {
 	int rc = -EBUSY;
+	struct hstate *h = NULL;
 
 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
 	if (!PageHuge(page))
@@ -1941,8 +1948,9 @@  int dissolve_free_huge_page(struct page *page)
 
 	if (!page_count(page)) {
 		struct page *head = compound_head(page);
-		struct hstate *h = page_hstate(head);
 		int nid = page_to_nid(head);
+
+		h = page_hstate(head);
 		if (h->free_huge_pages - h->resv_huge_pages == 0)
 			goto out;
 
@@ -1956,6 +1964,14 @@  int dissolve_free_huge_page(struct page *page)
 	}
 out:
 	spin_unlock(&hugetlb_lock);
+
+	/*
+	 * We should flush work before return to make sure that
+	 * the HugeTLB page is freed to the buddy.
+	 */
+	if (!rc && h)
+		flush_hpage_update_work(h);
+
 	return rc;
 }