Message ID | 20230807110936.21819-20-zhengqi.arch@bytedance.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | use refcount+RCU method to implement lockless slab shrink | expand |
On Mon, Aug 7, 2023 at 7:17 AM Qi Zheng <zhengqi.arch@bytedance.com> wrote: > > Use new APIs to dynamically allocate the rcu-kfree shrinker. > > Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> For RCU: Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org> thanks, - Joel > --- > kernel/rcu/tree.c | 22 +++++++++++++--------- > 1 file changed, 13 insertions(+), 9 deletions(-) > > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c > index 7c79480bfaa0..3b20fc46c514 100644 > --- a/kernel/rcu/tree.c > +++ b/kernel/rcu/tree.c > @@ -3449,13 +3449,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) > return freed == 0 ? SHRINK_STOP : freed; > } > > -static struct shrinker kfree_rcu_shrinker = { > - .count_objects = kfree_rcu_shrink_count, > - .scan_objects = kfree_rcu_shrink_scan, > - .batch = 0, > - .seeks = DEFAULT_SEEKS, > -}; > - > void __init kfree_rcu_scheduler_running(void) > { > int cpu; > @@ -4931,6 +4924,7 @@ static void __init kfree_rcu_batch_init(void) > { > int cpu; > int i, j; > + struct shrinker *kfree_rcu_shrinker; > > /* Clamp it to [0:100] seconds interval. */ > if (rcu_delay_page_cache_fill_msec < 0 || > @@ -4962,8 +4956,18 @@ static void __init kfree_rcu_batch_init(void) > INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); > krcp->initialized = true; > } > - if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree")) > - pr_err("Failed to register kfree_rcu() shrinker!\n"); > + > + kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree"); > + if (!kfree_rcu_shrinker) { > + pr_err("Failed to allocate kfree_rcu() shrinker!\n"); > + return; > + } > + > + kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count; > + kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan; > + kfree_rcu_shrinker->seeks = DEFAULT_SEEKS; > + > + shrinker_register(kfree_rcu_shrinker); > } > > void __init rcu_init(void) > -- > 2.30.2 >
> On Aug 7, 2023, at 19:09, Qi Zheng <zhengqi.arch@bytedance.com> wrote: > > Use new APIs to dynamically allocate the rcu-kfree shrinker. > > Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com>
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 7c79480bfaa0..3b20fc46c514 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3449,13 +3449,6 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) return freed == 0 ? SHRINK_STOP : freed; } -static struct shrinker kfree_rcu_shrinker = { - .count_objects = kfree_rcu_shrink_count, - .scan_objects = kfree_rcu_shrink_scan, - .batch = 0, - .seeks = DEFAULT_SEEKS, -}; - void __init kfree_rcu_scheduler_running(void) { int cpu; @@ -4931,6 +4924,7 @@ static void __init kfree_rcu_batch_init(void) { int cpu; int i, j; + struct shrinker *kfree_rcu_shrinker; /* Clamp it to [0:100] seconds interval. */ if (rcu_delay_page_cache_fill_msec < 0 || @@ -4962,8 +4956,18 @@ static void __init kfree_rcu_batch_init(void) INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); krcp->initialized = true; } - if (register_shrinker(&kfree_rcu_shrinker, "rcu-kfree")) - pr_err("Failed to register kfree_rcu() shrinker!\n"); + + kfree_rcu_shrinker = shrinker_alloc(0, "rcu-kfree"); + if (!kfree_rcu_shrinker) { + pr_err("Failed to allocate kfree_rcu() shrinker!\n"); + return; + } + + kfree_rcu_shrinker->count_objects = kfree_rcu_shrink_count; + kfree_rcu_shrinker->scan_objects = kfree_rcu_shrink_scan; + kfree_rcu_shrinker->seeks = DEFAULT_SEEKS; + + shrinker_register(kfree_rcu_shrinker); } void __init rcu_init(void)
Use new APIs to dynamically allocate the rcu-kfree shrinker. Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com> --- kernel/rcu/tree.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-)