@@ -1333,6 +1333,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (sk != NULL) {
kmemcheck_annotate_bitfield(sk, flags);
+ cgroup_sk_alloc(&sk->sk_cgrp_data);
if (security_sk_alloc(sk, family, priority))
goto out_free;
@@ -1340,7 +1341,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
if (!try_module_get(prot->owner))
goto out_free_sec;
sk_tx_queue_clear(sk);
- cgroup_sk_alloc(&sk->sk_cgrp_data);
}
return sk;
@@ -1348,6 +1348,7 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
out_free_sec:
security_sk_free(sk);
out_free:
+ cgroup_sk_free(&sk->sk_cgrp_data);
if (slab != NULL)
kmem_cache_free(slab, sk);
else
@@ -1363,8 +1364,8 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
owner = prot->owner;
slab = prot->slab;
- cgroup_sk_free(&sk->sk_cgrp_data);
security_sk_free(sk);
+ cgroup_sk_free(&sk->sk_cgrp_data);
if (slab != NULL)
kmem_cache_free(slab, sk);
else
This patch changes the order of allocations / calls to allocate the sock_cgroup_data before calling security_sk_alloc. In addition, this patch also reorders the deallocation when calling security_sk_free so that LSMs can examine the cgroup that a particular sk belongs to. Signed-off-by: Sarguun Dhillon <sargun@sargun.me> --- net/core/sock.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)