@@ -1640,7 +1640,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base, struct bpf_map *map);
bool bpf_map_write_active(const struct bpf_map *map);
-void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
int generic_map_lookup_batch(struct bpf_map *map,
const union bpf_attr *attr,
union bpf_attr __user *uattr);
@@ -85,6 +85,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
bool bypass_spec_v1 = bpf_bypass_spec_v1();
u64 array_size, mask64;
struct bpf_array *array;
+ int err;
elem_size = round_up(attr->value_size, 8);
@@ -143,7 +144,12 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.bypass_spec_v1 = bypass_spec_v1;
/* copy mandatory map attributes */
- bpf_map_init_from_attr(&array->map, attr);
+ err = bpf_map_init_from_attr(&array->map, attr);
+ if (err) {
+ bpf_map_area_free(array, NULL);
+ return ERR_PTR(err);
+ }
+
array->elem_size = elem_size;
if (percpu && bpf_array_alloc_percpu(array)) {
@@ -93,6 +93,7 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
u32 bitset_bytes, bitset_mask, nr_hash_funcs, nr_bits;
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_bloom_filter *bloom;
+ int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -147,7 +148,11 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
if (!bloom)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&bloom->map, attr);
+ err = bpf_map_init_from_attr(&bloom->map, attr);
+ if (err) {
+ bpf_map_area_free(bloom, NULL);
+ return ERR_PTR(err);
+ }
bloom->nr_hash_funcs = nr_hash_funcs;
bloom->bitset_mask = bitset_mask;
@@ -609,11 +609,16 @@ struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
struct bpf_local_storage_map *smap;
unsigned int i;
u32 nbuckets;
+ int err;
smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE, NULL);
if (!smap)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&smap->map, attr);
+ err = bpf_map_init_from_attr(&smap->map, attr);
+ if (err) {
+ bpf_map_area_free(&smap->map, NULL);
+ return ERR_PTR(err);
+ }
nbuckets = roundup_pow_of_two(num_possible_cpus());
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
@@ -598,6 +598,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
struct bpf_struct_ops_map *st_map;
const struct btf_type *t, *vt;
struct bpf_map *map;
+ int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -624,7 +625,11 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
st_map->st_ops = st_ops;
map = &st_map->map;
- bpf_map_init_from_attr(map, attr);
+ err = bpf_map_init_from_attr(map, attr);
+ if (err) {
+ bpf_map_area_free(st_map, NULL);
+ return ERR_PTR(err);
+ }
st_map->uvalue = bpf_map_area_alloc(vt->size, NUMA_NO_NODE, map);
st_map->links =
@@ -101,7 +101,11 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (!cmap)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&cmap->map, attr);
+ err = bpf_map_init_from_attr(&cmap->map, attr);
+ if (err) {
+ bpf_map_area_free(cmap, NULL);
+ return ERR_PTR(err);
+ }
/* Pre-limit array size based on NR_CPUS, not final CPU check */
if (cmap->map.max_entries > NR_CPUS) {
@@ -167,7 +167,11 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&dtab->map, attr);
+ err = bpf_map_init_from_attr(&dtab->map, attr);
+ if (err) {
+ bpf_map_area_free(dtab, NULL);
+ return ERR_PTR(err);
+ }
err = dev_map_init_map(dtab, attr);
if (err) {
@@ -509,7 +509,11 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (!htab)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&htab->map, attr);
+ err = bpf_map_init_from_attr(&htab->map, attr);
+ if (err) {
+ bpf_map_area_free(htab, NULL);
+ return ERR_PTR(err);
+ }
lockdep_register_key(&htab->lockdep_key);
@@ -287,6 +287,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
__u32 max_value_size = BPF_LOCAL_STORAGE_MAX_VALUE_SIZE;
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_cgroup_storage_map *map;
+ int err;
/* percpu is bound by PCPU_MIN_UNIT_SIZE, non-percu
* is the same as other local storages.
@@ -318,7 +319,11 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- bpf_map_init_from_attr(&map->map, attr);
+ err = bpf_map_init_from_attr(&map->map, attr);
+ if (err) {
+ bpf_map_area_free(map, NULL);
+ return ERR_PTR(err);
+ }
spin_lock_init(&map->lock);
map->root = RB_ROOT;
@@ -543,6 +543,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
static struct bpf_map *trie_alloc(union bpf_attr *attr)
{
struct lpm_trie *trie;
+ int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -563,7 +564,12 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- bpf_map_init_from_attr(&trie->map, attr);
+ err = bpf_map_init_from_attr(&trie->map, attr);
+ if (err) {
+ bpf_map_area_free(trie, NULL);
+ return ERR_PTR(err);
+ }
+
trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8;
@@ -376,7 +376,11 @@ struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
if (!offmap)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&offmap->map, attr);
+ err = bpf_map_init_from_attr(&offmap->map, attr);
+ if (err) {
+ bpf_map_area_free(offmap, NULL);
+ return ERR_PTR(err);
+ }
rtnl_lock();
down_write(&bpf_devs_lock);
@@ -70,6 +70,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_queue_stack *qs;
u64 size, queue_size;
+ int err;
size = (u64) attr->max_entries + 1;
queue_size = sizeof(*qs) + size * attr->value_size;
@@ -78,7 +79,11 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
if (!qs)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&qs->map, attr);
+ err = bpf_map_init_from_attr(&qs->map, attr);
+ if (err) {
+ bpf_map_area_free(qs, NULL);
+ return ERR_PTR(err);
+ }
qs->size = size;
@@ -150,6 +150,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
{
int numa_node = bpf_map_attr_numa_node(attr);
struct reuseport_array *array;
+ int err;
if (!bpf_capable())
return ERR_PTR(-EPERM);
@@ -160,7 +161,11 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
- bpf_map_init_from_attr(&array->map, attr);
+ err = bpf_map_init_from_attr(&array->map, attr);
+ if (err) {
+ bpf_map_area_free(array, NULL);
+ return ERR_PTR(err);
+ }
return &array->map;
}
@@ -185,6 +185,7 @@ static struct bpf_ringbuf *bpf_ringbuf_alloc(size_t data_sz, int numa_node,
static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
{
struct bpf_ringbuf_map *rb_map;
+ int err;
if (attr->map_flags & ~RINGBUF_CREATE_FLAG_MASK)
return ERR_PTR(-EINVAL);
@@ -204,7 +205,11 @@ static struct bpf_map *ringbuf_map_alloc(union bpf_attr *attr)
if (!rb_map)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&rb_map->map, attr);
+ err = bpf_map_init_from_attr(&rb_map->map, attr);
+ if (err) {
+ bpf_map_area_free(rb_map, NULL);
+ return ERR_PTR(err);
+ }
rb_map->rb = bpf_ringbuf_alloc(attr->max_entries, rb_map->map.numa_node,
&rb_map->map);
@@ -403,7 +403,7 @@ static u32 bpf_map_flags_retain_permanent(u32 flags)
return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY);
}
-void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
+int bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
{
bpf_map_save_memcg(map);
map->map_type = attr->map_type;
@@ -413,6 +413,8 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags);
map->numa_node = bpf_map_attr_numa_node(attr);
map->map_extra = attr->map_extra;
+
+ return 0;
}
static int bpf_map_alloc_id(struct bpf_map *map)
@@ -31,6 +31,7 @@ static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -45,7 +46,12 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (!stab)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&stab->map, attr);
+ err = bpf_map_init_from_attr(&stab->map, attr);
+ if (err) {
+ bpf_map_area_free(stab, NULL);
+ return ERR_PTR(err);
+ }
+
raw_spin_lock_init(&stab->lock);
stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
@@ -63,6 +63,7 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
struct xsk_map *m;
int numa_node;
u64 size;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -79,7 +80,12 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
if (!m)
return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&m->map, attr);
+ err = bpf_map_init_from_attr(&m->map, attr);
+ if (err) {
+ bpf_map_area_free(m, NULL);
+ return ERR_PTR(err);
+ }
+
spin_lock_init(&m->lock);
return &m->map;
Add return value for bpf_map_init_from_attr() to indicate whether it init successfully. This is a preparation of the followup patch. Signed-off-by: Yafang Shao <laoar.shao@gmail.com> --- include/linux/bpf.h | 2 +- kernel/bpf/arraymap.c | 8 +++++++- kernel/bpf/bloom_filter.c | 7 ++++++- kernel/bpf/bpf_local_storage.c | 7 ++++++- kernel/bpf/bpf_struct_ops.c | 7 ++++++- kernel/bpf/cpumap.c | 6 +++++- kernel/bpf/devmap.c | 6 +++++- kernel/bpf/hashtab.c | 6 +++++- kernel/bpf/local_storage.c | 7 ++++++- kernel/bpf/lpm_trie.c | 8 +++++++- kernel/bpf/offload.c | 6 +++++- kernel/bpf/queue_stack_maps.c | 7 ++++++- kernel/bpf/reuseport_array.c | 7 ++++++- kernel/bpf/ringbuf.c | 7 ++++++- kernel/bpf/syscall.c | 4 +++- net/core/sock_map.c | 8 +++++++- net/xdp/xskmap.c | 8 +++++++- 17 files changed, 94 insertions(+), 17 deletions(-)