@@ -212,6 +212,15 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
if (i >= cnt)
return;
+ for (; i < cnt; i++) {
+ obj = llist_del_first(&c->waiting_for_gp_ttrace);
+ if (!obj)
+ break;
+ add_obj_to_free_list(c, obj);
+ }
+ if (i >= cnt)
+ return;
+
memcg = get_memcg(c);
old_memcg = set_active_memcg(memcg);
for (; i < cnt; i++) {
@@ -290,12 +299,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace));
llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace))
- /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access.
- * It doesn't race with llist_del_all either.
- * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace):
- * from __free_rcu() and from drain_mem_cache().
- */
- __llist_add(llnode, &c->waiting_for_gp_ttrace);
+ llist_add(llnode, &c->waiting_for_gp_ttrace);
if (unlikely(READ_ONCE(c->draining))) {
__free_rcu(&c->rcu_ttrace);