@@ -12,6 +12,12 @@ struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
struct work_struct work;
+ unsigned int flags;
+};
+
+/* flags for bpf_mem_alloc_init() */
+enum {
+ BPF_MA_PERCPU = 1U << 0,
};
/* 'size != 0' is for bpf_mem_alloc which manages fixed-size objects.
@@ -21,7 +27,7 @@ struct bpf_mem_alloc {
* Alloc and free are done with bpf_mem_{alloc,free}() and the size of
* the returned object is given by the size argument of bpf_mem_alloc().
*/
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags);
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
/* kmalloc/kfree equivalent: */
@@ -2762,7 +2762,7 @@ static int __init bpf_global_ma_init(void)
{
int ret;
- ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false);
+ ret = bpf_mem_alloc_init(&bpf_global_ma, 0, 0);
bpf_global_ma_set = !ret;
return ret;
}
@@ -445,7 +445,7 @@ static int __init cpumask_kfunc_init(void)
},
};
- ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sizeof(struct bpf_cpumask), false);
+ ret = bpf_mem_alloc_init(&bpf_cpumask_ma, sizeof(struct bpf_cpumask), 0);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &cpumask_kfunc_set);
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &cpumask_kfunc_set);
return ret ?: register_btf_id_dtor_kfuncs(cpumask_dtors,
@@ -576,12 +576,13 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_prealloc;
}
} else {
- err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
+ err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, 0);
if (err)
goto free_map_locked;
if (percpu) {
err = bpf_mem_alloc_init(&htab->pcpu_ma,
- round_up(htab->map.value_size, 8), true);
+ round_up(htab->map.value_size, 8),
+ BPF_MA_PERCPU);
if (err)
goto free_map_locked;
}
@@ -98,6 +98,7 @@ struct bpf_mem_cache {
int free_cnt;
int low_watermark, high_watermark, batch;
int percpu_size;
+ unsigned int flags;
struct rcu_head rcu;
struct llist_head free_by_rcu;
@@ -377,13 +378,14 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
* kmalloc/kfree. Max allocation size is 4096 in this case.
* This is bpf_dynptr and bpf_kptr use case.
*/
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags)
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
struct bpf_mem_caches *cc, __percpu *pcc;
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
+ bool percpu = (flags & BPF_MA_PERCPU);
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);
@@ -406,9 +408,11 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c->unit_size = unit_size;
c->objcg = objcg;
c->percpu_size = percpu_size;
+ c->flags = flags;
prefill_mem_cache(c, cpu);
}
ma->cache = pc;
+ ma->flags = flags;
return 0;
}
@@ -428,10 +432,12 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
c = &cc->cache[i];
c->unit_size = sizes[i];
c->objcg = objcg;
+ c->flags = flags;
prefill_mem_cache(c, cpu);
}
}
ma->caches = pcc;
+ ma->flags = flags;
return 0;
}