@@ -13,9 +13,15 @@ struct bpf_mem_alloc {
struct bpf_mem_cache __percpu *cache;
struct work_struct work;
void (*ctor)(struct bpf_mem_alloc *ma, void *obj);
+ unsigned int flags;
};
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+/* flags for bpf_mem_alloc_init() */
+enum {
+ BPF_MA_PERCPU = 1,
+};
+
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags,
void (*ctor)(struct bpf_mem_alloc *, void *));
void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma);
@@ -2755,7 +2755,7 @@ static int __init bpf_global_ma_init(void)
{
int ret;
- ret = bpf_mem_alloc_init(&bpf_global_ma, 0, false, NULL);
+ ret = bpf_mem_alloc_init(&bpf_global_ma, 0, 0, NULL);
bpf_global_ma_set = !ret;
return ret;
}
@@ -574,13 +574,14 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_prealloc;
}
} else {
- err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false,
+ err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, 0,
htab_elem_ctor);
if (err)
goto free_map_locked;
if (percpu) {
err = bpf_mem_alloc_init(&htab->pcpu_ma,
- round_up(htab->map.value_size, 8), true, NULL);
+ round_up(htab->map.value_size, 8),
+ BPF_MA_PERCPU, NULL);
if (err)
goto free_map_locked;
}
@@ -383,7 +383,7 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
* kmalloc/kfree. Max allocation size is 4096 in this case.
* This is bpf_dynptr and bpf_kptr use case.
*/
-int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
+int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, unsigned int flags,
void (*ctor)(struct bpf_mem_alloc *, void *))
{
static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096};
@@ -391,7 +391,9 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu,
struct bpf_mem_cache *c, __percpu *pc;
struct obj_cgroup *objcg = NULL;
int cpu, i, unit_size, percpu_size = 0;
+ bool percpu = (flags & BPF_MA_PERCPU);
+ ma->flags = flags;
ma->ctor = ctor;
if (size) {
pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL);