diff mbox series

[v3,13/18] mm/memcg: Add folio_memcg_lock() and folio_memcg_unlock()

Message ID 20210630040034.1155892-14-willy@infradead.org (mailing list archive)
State New
Headers show
Series Folio conversion of memcg | expand

Commit Message

Matthew Wilcox (Oracle) June 30, 2021, 4 a.m. UTC
These are the folio equivalents of lock_page_memcg() and
unlock_page_memcg().  Reimplement them as wrappers.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/memcontrol.h | 10 +++++++++
 mm/memcontrol.c            | 45 ++++++++++++++++++++++++--------------
 2 files changed, 39 insertions(+), 16 deletions(-)

Comments

Michal Hocko June 30, 2021, 8:32 a.m. UTC | #1
On Wed 30-06-21 05:00:29, Matthew Wilcox wrote:
> These are the folio equivalents of lock_page_memcg() and
> unlock_page_memcg().  Reimplement them as wrappers.

Is there any reason why you haven't followed the same approach as for
the previous patches. I mean callers can call page_folio and then
lock_page_memcg wrapper shouldn't be really needed.

I do not really want to be annoying here but I have to say that I like
the conversion by previous patches much better than this wrapper
approach as mentioned during the previous review already. If you have
some reasons to stick with this approach for this particular case then
make it explicit in the changelog.

> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>  include/linux/memcontrol.h | 10 +++++++++
>  mm/memcontrol.c            | 45 ++++++++++++++++++++++++--------------
>  2 files changed, 39 insertions(+), 16 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index ef79f9c0b296..279ea2640365 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -951,6 +951,8 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
>  extern bool cgroup_memory_noswap;
>  #endif
>  
> +void folio_memcg_lock(struct folio *folio);
> +void folio_memcg_unlock(struct folio *folio);
>  void lock_page_memcg(struct page *page);
>  void unlock_page_memcg(struct page *page);
>  
> @@ -1363,6 +1365,14 @@ static inline void unlock_page_memcg(struct page *page)
>  {
>  }
>  
> +static inline void folio_memcg_lock(struct folio *folio)
> +{
> +}
> +
> +static inline void folio_memcg_unlock(struct folio *folio)
> +{
> +}
> +
>  static inline void mem_cgroup_handle_over_high(void)
>  {
>  }
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index b925bdce0c6e..b94a6122f27d 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1960,18 +1960,17 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
>  }
>  
>  /**
> - * lock_page_memcg - lock a page and memcg binding
> - * @page: the page
> + * folio_memcg_lock - Bind a folio to its memcg.
> + * @folio: The folio.
>   *
> - * This function protects unlocked LRU pages from being moved to
> + * This function prevents unlocked LRU folios from being moved to
>   * another cgroup.
>   *
> - * It ensures lifetime of the locked memcg. Caller is responsible
> - * for the lifetime of the page.
> + * It ensures lifetime of the bound memcg.  The caller is responsible
> + * for the lifetime of the folio.
>   */
> -void lock_page_memcg(struct page *page)
> +void folio_memcg_lock(struct folio *folio)
>  {
> -	struct page *head = compound_head(page); /* rmap on tail pages */
>  	struct mem_cgroup *memcg;
>  	unsigned long flags;
>  
> @@ -1985,7 +1984,7 @@ void lock_page_memcg(struct page *page)
>  	if (mem_cgroup_disabled())
>  		return;
>  again:
> -	memcg = page_memcg(head);
> +	memcg = folio_memcg(folio);
>  	if (unlikely(!memcg))
>  		return;
>  
> @@ -1999,7 +1998,7 @@ void lock_page_memcg(struct page *page)
>  		return;
>  
>  	spin_lock_irqsave(&memcg->move_lock, flags);
> -	if (memcg != page_memcg(head)) {
> +	if (memcg != folio_memcg(folio)) {
>  		spin_unlock_irqrestore(&memcg->move_lock, flags);
>  		goto again;
>  	}
> @@ -2013,9 +2012,15 @@ void lock_page_memcg(struct page *page)
>  	memcg->move_lock_task = current;
>  	memcg->move_lock_flags = flags;
>  }
> +EXPORT_SYMBOL(folio_memcg_lock);
> +
> +void lock_page_memcg(struct page *page)
> +{
> +	folio_memcg_lock(page_folio(page));
> +}
>  EXPORT_SYMBOL(lock_page_memcg);
>  
> -static void __unlock_page_memcg(struct mem_cgroup *memcg)
> +static void __memcg_unlock(struct mem_cgroup *memcg)
>  {
>  	if (memcg && memcg->move_lock_task == current) {
>  		unsigned long flags = memcg->move_lock_flags;
> @@ -2030,14 +2035,22 @@ static void __unlock_page_memcg(struct mem_cgroup *memcg)
>  }
>  
>  /**
> - * unlock_page_memcg - unlock a page and memcg binding
> - * @page: the page
> + * folio_memcg_unlock - Release the binding between a folio and its memcg.
> + * @folio: The folio.
> + *
> + * This releases the binding created by folio_memcg_lock().  This does
> + * not change the accounting of this folio to its memcg, but it does
> + * permit others to change it.
>   */
> -void unlock_page_memcg(struct page *page)
> +void folio_memcg_unlock(struct folio *folio)
>  {
> -	struct page *head = compound_head(page);
> +	__memcg_unlock(folio_memcg(folio));
> +}
> +EXPORT_SYMBOL(folio_memcg_unlock);
>  
> -	__unlock_page_memcg(page_memcg(head));
> +void unlock_page_memcg(struct page *page)
> +{
> +	folio_memcg_unlock(page_folio(page));
>  }
>  EXPORT_SYMBOL(unlock_page_memcg);
>  
> @@ -5661,7 +5674,7 @@ static int mem_cgroup_move_account(struct page *page,
>  
>  	page->memcg_data = (unsigned long)to;
>  
> -	__unlock_page_memcg(from);
> +	__memcg_unlock(from);
>  
>  	ret = 0;
>  	nid = page_to_nid(page);
> -- 
> 2.30.2
Matthew Wilcox (Oracle) July 7, 2021, 3:10 p.m. UTC | #2
On Wed, Jun 30, 2021 at 10:32:02AM +0200, Michal Hocko wrote:
> On Wed 30-06-21 05:00:29, Matthew Wilcox wrote:
> > These are the folio equivalents of lock_page_memcg() and
> > unlock_page_memcg().  Reimplement them as wrappers.
> 
> Is there any reason why you haven't followed the same approach as for
> the previous patches. I mean callers can call page_folio and then
> lock_page_memcg wrapper shouldn't be really needed.

At this point in the patch series there are ~20 places which call
lock_page_memcg().  I think it makes more sense to leave the wrapper
in place, and then we can remove the wrapper once all/most of these
places are converted to use folios.  There are another 5 conversions
already in the patch series, eg here:

https://git.infradead.org/users/willy/pagecache.git/commitdiff/a41c942c8e4b41df30be128ef6998ff1849fa36a

> I do not really want to be annoying here but I have to say that I like
> the conversion by previous patches much better than this wrapper
> approach as mentioned during the previous review already. If you have
> some reasons to stick with this approach for this particular case then
> make it explicit in the changelog.

OK, I can point to the number of callers as a reason to keep the
wrappers in place.  I intended to just do the conversion here, but
seeing the number of callers made me reconsider.
Johannes Weiner July 7, 2021, 5:08 p.m. UTC | #3
On Wed, Jun 30, 2021 at 05:00:29AM +0100, Matthew Wilcox (Oracle) wrote:
> -static void __unlock_page_memcg(struct mem_cgroup *memcg)
> +static void __memcg_unlock(struct mem_cgroup *memcg)

This is too generic a name. There are several locks in the memcg, and
this one only locks the page->memcg bindings in the group.
Matthew Wilcox (Oracle) July 7, 2021, 7:28 p.m. UTC | #4
On Wed, Jul 07, 2021 at 01:08:51PM -0400, Johannes Weiner wrote:
> On Wed, Jun 30, 2021 at 05:00:29AM +0100, Matthew Wilcox (Oracle) wrote:
> > -static void __unlock_page_memcg(struct mem_cgroup *memcg)
> > +static void __memcg_unlock(struct mem_cgroup *memcg)
> 
> This is too generic a name. There are several locks in the memcg, and
> this one only locks the page->memcg bindings in the group.

Fair.  __memcg_move_unlock looks like the right name to me?
Johannes Weiner July 7, 2021, 8:41 p.m. UTC | #5
On Wed, Jul 07, 2021 at 08:28:39PM +0100, Matthew Wilcox wrote:
> On Wed, Jul 07, 2021 at 01:08:51PM -0400, Johannes Weiner wrote:
> > On Wed, Jun 30, 2021 at 05:00:29AM +0100, Matthew Wilcox (Oracle) wrote:
> > > -static void __unlock_page_memcg(struct mem_cgroup *memcg)
> > > +static void __memcg_unlock(struct mem_cgroup *memcg)
> > 
> > This is too generic a name. There are several locks in the memcg, and
> > this one only locks the page->memcg bindings in the group.
> 
> Fair.  __memcg_move_unlock looks like the right name to me?

Could you please elaborate what the problem with the current name is?

mem_cgroup_move_account() does this:

	lock_page_memcg(page);
	page->memcg = to;
	__unlock_page_memcg(from);

It locks and unlocks the page->memcg binding which can be done coming
from the page or the memcg. The current names are symmetrical to
reflect that it's the same lock.

We could switch them both to move_lock, but as per the other email,
lock_page_memcg() was chosen to resemble lock_page(). Because from a
memcg POV they're interchangeable - the former is just a more narrowly
scoped version for contexts that don't hold the page lock. It used to
be called something else and we had several contexts taking redundant
locks on accident because this hierarchy wasn't clear.

I don't mind fixing poorly chosen or misleading naming schemes, but I
think we need better explanations to overcome the reasoning behind the
existing names, not just the assumption that there weren't any.
Michal Hocko July 8, 2021, 7:28 a.m. UTC | #6
On Wed 07-07-21 16:10:49, Matthew Wilcox wrote:
[...]
> > I do not really want to be annoying here but I have to say that I like
> > the conversion by previous patches much better than this wrapper
> > approach as mentioned during the previous review already. If you have
> > some reasons to stick with this approach for this particular case then
> > make it explicit in the changelog.
> 
> OK, I can point to the number of callers as a reason to keep the
> wrappers in place.  I intended to just do the conversion here, but
> seeing the number of callers made me reconsider.

OK, fair enough. My worry is that we will have this lingering for way
too long. People simply tend to copy code... Anyway, please add a
comment warning that the wrapper shouldn't be used in any new code at
least.

Thanks!
Matthew Wilcox (Oracle) July 9, 2021, 7:37 p.m. UTC | #7
On Wed, Jul 07, 2021 at 04:41:05PM -0400, Johannes Weiner wrote:
> On Wed, Jul 07, 2021 at 08:28:39PM +0100, Matthew Wilcox wrote:
> > On Wed, Jul 07, 2021 at 01:08:51PM -0400, Johannes Weiner wrote:
> > > On Wed, Jun 30, 2021 at 05:00:29AM +0100, Matthew Wilcox (Oracle) wrote:
> > > > -static void __unlock_page_memcg(struct mem_cgroup *memcg)
> > > > +static void __memcg_unlock(struct mem_cgroup *memcg)
> > > 
> > > This is too generic a name. There are several locks in the memcg, and
> > > this one only locks the page->memcg bindings in the group.
> > 
> > Fair.  __memcg_move_unlock looks like the right name to me?
> 
> Could you please elaborate what the problem with the current name is?
> 
> mem_cgroup_move_account() does this:
> 
> 	lock_page_memcg(page);
> 	page->memcg = to;
> 	__unlock_page_memcg(from);
> 
> It locks and unlocks the page->memcg binding which can be done coming
> from the page or the memcg. The current names are symmetrical to
> reflect that it's the same lock.

OK, so in the prerequisite series to this patch, lock_page() becomes
folio_lock().  This series turns lock_page_memcg() into
folio_memcg_lock().  As a minimum, then, this needs to turn into
__folio_memcg_unlock().

> We could switch them both to move_lock, but as per the other email,
> lock_page_memcg() was chosen to resemble lock_page(). Because from a
> memcg POV they're interchangeable - the former is just a more narrowly
> scoped version for contexts that don't hold the page lock. It used to
> be called something else and we had several contexts taking redundant
> locks on accident because this hierarchy wasn't clear.

Unfortunately, it's still not clear.  I've answered questions from
people who think that they have the page locked because they called
lock_page_memcg() ;-(
diff mbox series

Patch

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ef79f9c0b296..279ea2640365 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -951,6 +951,8 @@  void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
 extern bool cgroup_memory_noswap;
 #endif
 
+void folio_memcg_lock(struct folio *folio);
+void folio_memcg_unlock(struct folio *folio);
 void lock_page_memcg(struct page *page);
 void unlock_page_memcg(struct page *page);
 
@@ -1363,6 +1365,14 @@  static inline void unlock_page_memcg(struct page *page)
 {
 }
 
+static inline void folio_memcg_lock(struct folio *folio)
+{
+}
+
+static inline void folio_memcg_unlock(struct folio *folio)
+{
+}
+
 static inline void mem_cgroup_handle_over_high(void)
 {
 }
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b925bdce0c6e..b94a6122f27d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1960,18 +1960,17 @@  void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
 }
 
 /**
- * lock_page_memcg - lock a page and memcg binding
- * @page: the page
+ * folio_memcg_lock - Bind a folio to its memcg.
+ * @folio: The folio.
  *
- * This function protects unlocked LRU pages from being moved to
+ * This function prevents unlocked LRU folios from being moved to
  * another cgroup.
  *
- * It ensures lifetime of the locked memcg. Caller is responsible
- * for the lifetime of the page.
+ * It ensures lifetime of the bound memcg.  The caller is responsible
+ * for the lifetime of the folio.
  */
-void lock_page_memcg(struct page *page)
+void folio_memcg_lock(struct folio *folio)
 {
-	struct page *head = compound_head(page); /* rmap on tail pages */
 	struct mem_cgroup *memcg;
 	unsigned long flags;
 
@@ -1985,7 +1984,7 @@  void lock_page_memcg(struct page *page)
 	if (mem_cgroup_disabled())
 		return;
 again:
-	memcg = page_memcg(head);
+	memcg = folio_memcg(folio);
 	if (unlikely(!memcg))
 		return;
 
@@ -1999,7 +1998,7 @@  void lock_page_memcg(struct page *page)
 		return;
 
 	spin_lock_irqsave(&memcg->move_lock, flags);
-	if (memcg != page_memcg(head)) {
+	if (memcg != folio_memcg(folio)) {
 		spin_unlock_irqrestore(&memcg->move_lock, flags);
 		goto again;
 	}
@@ -2013,9 +2012,15 @@  void lock_page_memcg(struct page *page)
 	memcg->move_lock_task = current;
 	memcg->move_lock_flags = flags;
 }
+EXPORT_SYMBOL(folio_memcg_lock);
+
+void lock_page_memcg(struct page *page)
+{
+	folio_memcg_lock(page_folio(page));
+}
 EXPORT_SYMBOL(lock_page_memcg);
 
-static void __unlock_page_memcg(struct mem_cgroup *memcg)
+static void __memcg_unlock(struct mem_cgroup *memcg)
 {
 	if (memcg && memcg->move_lock_task == current) {
 		unsigned long flags = memcg->move_lock_flags;
@@ -2030,14 +2035,22 @@  static void __unlock_page_memcg(struct mem_cgroup *memcg)
 }
 
 /**
- * unlock_page_memcg - unlock a page and memcg binding
- * @page: the page
+ * folio_memcg_unlock - Release the binding between a folio and its memcg.
+ * @folio: The folio.
+ *
+ * This releases the binding created by folio_memcg_lock().  This does
+ * not change the accounting of this folio to its memcg, but it does
+ * permit others to change it.
  */
-void unlock_page_memcg(struct page *page)
+void folio_memcg_unlock(struct folio *folio)
 {
-	struct page *head = compound_head(page);
+	__memcg_unlock(folio_memcg(folio));
+}
+EXPORT_SYMBOL(folio_memcg_unlock);
 
-	__unlock_page_memcg(page_memcg(head));
+void unlock_page_memcg(struct page *page)
+{
+	folio_memcg_unlock(page_folio(page));
 }
 EXPORT_SYMBOL(unlock_page_memcg);
 
@@ -5661,7 +5674,7 @@  static int mem_cgroup_move_account(struct page *page,
 
 	page->memcg_data = (unsigned long)to;
 
-	__unlock_page_memcg(from);
+	__memcg_unlock(from);
 
 	ret = 0;
 	nid = page_to_nid(page);