diff mbox series

[4/4] mm: shrinker: make shrinker not depend on memcg kmem

Message ID 1559887659-23121-5-git-send-email-yang.shi@linux.alibaba.com (mailing list archive)
State New, archived
Headers show
Series Make deferred split shrinker memcg aware | expand

Commit Message

Yang Shi June 7, 2019, 6:07 a.m. UTC
Currently shrinker is just allocated and can work when memcg kmem is
enabled.  But, THP deferred split shrinker is not slab shrinker, it
doesn't make too much sense to have such shrinker depend on memcg kmem.
It should be able to reclaim THP even though memcg kmem is disabled.

Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker,
i.e. THP deferred split shrinker.  When memcg kmem is disabled, just
such shrinkers can be called in shrinking memcg slab.

Cc: Kirill Tkhai <ktkhai@virtuozzo.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Yang Shi <yang.shi@linux.alibaba.com>
---
 include/linux/shrinker.h |  3 +--
 mm/huge_memory.c         |  3 ++-
 mm/vmscan.c              | 27 ++++++---------------------
 3 files changed, 9 insertions(+), 24 deletions(-)

Comments

Kirill A. Shutemov June 12, 2019, 2:52 a.m. UTC | #1
On Fri, Jun 07, 2019 at 02:07:39PM +0800, Yang Shi wrote:
> Currently shrinker is just allocated and can work when memcg kmem is
> enabled.  But, THP deferred split shrinker is not slab shrinker, it
> doesn't make too much sense to have such shrinker depend on memcg kmem.
> It should be able to reclaim THP even though memcg kmem is disabled.
> 
> Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker,
> i.e. THP deferred split shrinker.  When memcg kmem is disabled, just
> such shrinkers can be called in shrinking memcg slab.

Looks like it breaks bisectability. It has to be done before makeing
shrinker memcg-aware, hasn't it?
Yang Shi June 12, 2019, 5:07 a.m. UTC | #2
On 6/11/19 7:52 PM, Kirill A. Shutemov wrote:
> On Fri, Jun 07, 2019 at 02:07:39PM +0800, Yang Shi wrote:
>> Currently shrinker is just allocated and can work when memcg kmem is
>> enabled.  But, THP deferred split shrinker is not slab shrinker, it
>> doesn't make too much sense to have such shrinker depend on memcg kmem.
>> It should be able to reclaim THP even though memcg kmem is disabled.
>>
>> Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker,
>> i.e. THP deferred split shrinker.  When memcg kmem is disabled, just
>> such shrinkers can be called in shrinking memcg slab.
> Looks like it breaks bisectability. It has to be done before makeing
> shrinker memcg-aware, hasn't it?

No, it doesn't break bisectability. But, THP shrinker just can be called 
with kmem charge enabled without this patch.

>
Kirill A. Shutemov June 12, 2019, 10:11 a.m. UTC | #3
On Tue, Jun 11, 2019 at 10:07:54PM -0700, Yang Shi wrote:
> 
> 
> On 6/11/19 7:52 PM, Kirill A. Shutemov wrote:
> > On Fri, Jun 07, 2019 at 02:07:39PM +0800, Yang Shi wrote:
> > > Currently shrinker is just allocated and can work when memcg kmem is
> > > enabled.  But, THP deferred split shrinker is not slab shrinker, it
> > > doesn't make too much sense to have such shrinker depend on memcg kmem.
> > > It should be able to reclaim THP even though memcg kmem is disabled.
> > > 
> > > Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker,
> > > i.e. THP deferred split shrinker.  When memcg kmem is disabled, just
> > > such shrinkers can be called in shrinking memcg slab.
> > Looks like it breaks bisectability. It has to be done before makeing
> > shrinker memcg-aware, hasn't it?
> 
> No, it doesn't break bisectability. But, THP shrinker just can be called
> with kmem charge enabled without this patch.

So, if kmem is disabled, it will not be called, right? Then it is
regression in my opinion. This patch has to go in before 2/4.
Yang Shi June 12, 2019, 5:20 p.m. UTC | #4
On 6/12/19 3:11 AM, Kirill A. Shutemov wrote:
> On Tue, Jun 11, 2019 at 10:07:54PM -0700, Yang Shi wrote:
>>
>> On 6/11/19 7:52 PM, Kirill A. Shutemov wrote:
>>> On Fri, Jun 07, 2019 at 02:07:39PM +0800, Yang Shi wrote:
>>>> Currently shrinker is just allocated and can work when memcg kmem is
>>>> enabled.  But, THP deferred split shrinker is not slab shrinker, it
>>>> doesn't make too much sense to have such shrinker depend on memcg kmem.
>>>> It should be able to reclaim THP even though memcg kmem is disabled.
>>>>
>>>> Introduce a new shrinker flag, SHRINKER_NONSLAB, for non-slab shrinker,
>>>> i.e. THP deferred split shrinker.  When memcg kmem is disabled, just
>>>> such shrinkers can be called in shrinking memcg slab.
>>> Looks like it breaks bisectability. It has to be done before makeing
>>> shrinker memcg-aware, hasn't it?
>> No, it doesn't break bisectability. But, THP shrinker just can be called
>> with kmem charge enabled without this patch.
> So, if kmem is disabled, it will not be called, right? Then it is
> regression in my opinion. This patch has to go in before 2/4.

I don't think this is a regression. "regression" should mean something 
used to work, but it is broken now. Actually, deferred split shrinker 
never works with memcg.

Anyway, either before 2/4 or after 2/4 looks ok.

>
diff mbox series

Patch

diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 9443caf..e14f68e 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -69,10 +69,8 @@  struct shrinker {
 
 	/* These are for internal use */
 	struct list_head list;
-#ifdef CONFIG_MEMCG_KMEM
 	/* ID in shrinker_idr */
 	int id;
-#endif
 	/* objs pending delete, per node */
 	atomic_long_t *nr_deferred;
 };
@@ -81,6 +79,7 @@  struct shrinker {
 /* Flags */
 #define SHRINKER_NUMA_AWARE	(1 << 0)
 #define SHRINKER_MEMCG_AWARE	(1 << 1)
+#define SHRINKER_NONSLAB	(1 << 2)
 
 extern int prealloc_shrinker(struct shrinker *shrinker);
 extern void register_shrinker_prepared(struct shrinker *shrinker);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 50f4720..e77a9fc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2913,7 +2913,8 @@  static unsigned long deferred_split_scan(struct shrinker *shrink,
 	.count_objects = deferred_split_count,
 	.scan_objects = deferred_split_scan,
 	.seeks = DEFAULT_SEEKS,
-	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE,
+	.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
+		 SHRINKER_NONSLAB,
 };
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7acd0af..62000ae 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -174,8 +174,6 @@  struct scan_control {
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
-#ifdef CONFIG_MEMCG_KMEM
-
 /*
  * We allow subsystems to populate their shrinker-related
  * LRU lists before register_shrinker_prepared() is called
@@ -227,16 +225,6 @@  static void unregister_memcg_shrinker(struct shrinker *shrinker)
 	idr_remove(&shrinker_idr, id);
 	up_write(&shrinker_rwsem);
 }
-#else /* CONFIG_MEMCG_KMEM */
-static int prealloc_memcg_shrinker(struct shrinker *shrinker)
-{
-	return 0;
-}
-
-static void unregister_memcg_shrinker(struct shrinker *shrinker)
-{
-}
-#endif /* CONFIG_MEMCG_KMEM */
 
 #ifdef CONFIG_MEMCG
 static bool global_reclaim(struct scan_control *sc)
@@ -579,7 +567,6 @@  static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
 	return freed;
 }
 
-#ifdef CONFIG_MEMCG_KMEM
 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 			struct mem_cgroup *memcg, int priority)
 {
@@ -587,7 +574,7 @@  static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 	unsigned long ret, freed = 0;
 	int i;
 
-	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
+	if (!mem_cgroup_online(memcg))
 		return 0;
 
 	if (!down_read_trylock(&shrinker_rwsem))
@@ -613,6 +600,11 @@  static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 			continue;
 		}
 
+		/* Call non-slab shrinkers even though kmem is disabled */
+		if (!memcg_kmem_enabled() &&
+		    !(shrinker->flags & SHRINKER_NONSLAB))
+			continue;
+
 		ret = do_shrink_slab(&sc, shrinker, priority);
 		if (ret == SHRINK_EMPTY) {
 			clear_bit(i, map->map);
@@ -649,13 +641,6 @@  static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
 	up_read(&shrinker_rwsem);
 	return freed;
 }
-#else /* CONFIG_MEMCG_KMEM */
-static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
-			struct mem_cgroup *memcg, int priority)
-{
-	return 0;
-}
-#endif /* CONFIG_MEMCG_KMEM */
 
 /**
  * shrink_slab - shrink slab caches