@@ -12,9 +12,11 @@ struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
struct work_struct work;
+ void (*ctor)(struct bpf_mem_alloc *ma, void *obj);
};
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+ void (*ctor)(struct bpf_mem_alloc *, void *));
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
/* kmalloc/kfree equivalent: */
@@ -2755,7 +2755,7 @@ static int __init bpf_global_ma_init(void)
{
int ret;
- ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
+ ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false, NULL);
bpf_global_ma_set = !ret;
return ret;
}
@@ -453,6 +453,15 @@ static int htab_map_alloc_check(union bpf_attr *attr)
return 0;
}
+static void htab_elem_ctor(struct bpf_mem_alloc *ma, void *obj)
+{
+ struct bpf_htab *htab = container_of(ma, struct bpf_htab, ma);
+ struct htab_elem *elem = obj;
+
+ check_and_init_map_value(&htab->map,
+ elem->key + round_up(htab->map.key_size, 8));
+}
+
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
@@ -565,12 +574,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_prealloc;
}
} else {
- err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
+ err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false,
+ htab_elem_ctor);
if (err)
goto free_map_locked;
if (percpu) {
err = bpf_mem_alloc_init(&htab->pcpu_ma,
- round_up(htab->map.value_size, 8), true);
+ round_up(htab->map.value_size, 8), true, NULL);
if (err)
goto free_map_locked;
}
@@ -1004,8 +1014,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new = ERR_PTR(-ENOMEM);
goto dec_count;
}
- check_and_init_map_value(&htab->map,
- l_new->key + round_up(key_size, 8));
}
memcpy(l_new->key, key, key_size);
@@ -98,6 +98,7 @@ struct bpf_mem_cache {
int free_cnt;
int low_watermark, high_watermark, batch;
int percpu_size;
+ struct bpf_mem_alloc *ma;
struct rcu_head rcu;
struct llist_head free_by_rcu;
@@ -188,6 +189,9 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
obj = __alloc(c, node);
if (!obj)
break;
+ /* Only do initialize for newly allocated object */
+ if (c->ma->ctor)
+ c->ma->ctor(c->ma, obj);
}
if (IS_ENABLED(CONFIG_PREEMPT_RT))
/* In RT irq_work runs in per-cpu kthread, so disable
@@ -374,7 +378,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
* kmalloc/kfree. Max allocation size is 4096 in this case.
* This is bpf_dynptr and bpf_kptr use case.
*/
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+ void (*ctor)(struct bpf_mem_alloc *, void *))
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
struct bpf_mem_caches *cc, __percpu *pcc;
@@ -382,6 +387,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
+ ma->ctor = ctor;
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
if (!pc)
@@ -402,6 +408,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = unit_size;
c->objcg = objcg;
c->percpu_size = percpu_size;
+ c->ma = ma;
prefill_mem_cache(c, cpu);
}
ma->cache = pc;
@@ -424,6 +431,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i];
c->unit_size = sizes[i];
c->objcg = objcg;
+ c->ma = ma;
prefill_mem_cache(c, cpu);
}
}