@@ -31,6 +31,9 @@ struct shrink_control {
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
+
+ /* derived from struct scan_control */
+ bool memcg_low_reclaim;
};
#define SHRINK_STOP (~0UL)
@@ -625,10 +625,9 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
/**
* shrink_slab - shrink slab caches
- * @gfp_mask: allocation context
- * @nid: node whose slab caches to target
* @memcg: memory cgroup whose slab caches to target
- * @priority: the reclaim priority
+ * @sc: scan_control struct for this reclaim session
+ * @nid: node whose slab caches to target
*
* Call the shrink functions to age shrinkable caches.
*
@@ -638,15 +637,18 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
* @memcg specifies the memory cgroup to target. Unaware shrinkers
* are called only if it is the root cgroup.
*
- * @priority is sc->priority, we take the number of objects and >> by priority
- * in order to get the scan target.
+ * @sc is the scan_control struct, we take the number of objects
+ * and >> by sc->priority in order to get the scan target.
*
* Returns the number of reclaimed slab objects.
*/
-static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
- struct mem_cgroup *memcg,
- int priority)
+static unsigned long shrink_slab(struct mem_cgroup *memcg,
+ struct scan_control *sc,
+ int nid)
{
+ bool memcg_low_reclaim = sc->memcg_low_reclaim;
+ gfp_t gfp_mask = sc->gfp_mask;
+ int priority = sc->priority;
unsigned long ret, freed = 0;
struct shrinker *shrinker;
@@ -668,6 +670,7 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
.gfp_mask = gfp_mask,
.nid = nid,
.memcg = memcg,
+ .memcg_low_reclaim = memcg_low_reclaim,
};
ret = do_shrink_slab(&sc, shrinker, priority);
@@ -694,6 +697,9 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
void drop_slab_node(int nid)
{
unsigned long freed;
+ struct scan_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ };
do {
struct mem_cgroup *memcg = NULL;
@@ -701,7 +707,7 @@ void drop_slab_node(int nid)
freed = 0;
memcg = mem_cgroup_iter(NULL, NULL, NULL);
do {
- freed += shrink_slab(GFP_KERNEL, nid, memcg, 0);
+ freed += shrink_slab(memcg, &sc, nid);
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
} while (freed > 10);
}
@@ -2673,8 +2679,7 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
shrink_lruvec(lruvec, sc);
- shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
- sc->priority);
+ shrink_slab(memcg, sc, pgdat->node_id);
/* Record the group's reclaim efficiency */
vmpressure(sc->gfp_mask, memcg, false,
A new member memcg_low_reclaim is introduced in shrink_control struct, which is derived from scan_control struct, in order to tell the shrinker whether the reclaim session is under memcg low reclaim or not. The followup patch will use this new member. Cc: Dave Chinner <dchinner@redhat.com> Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- include/linux/shrinker.h | 3 +++ mm/vmscan.c | 27 ++++++++++++++++----------- 2 files changed, 19 insertions(+), 11 deletions(-)