@@ -121,12 +121,12 @@ struct label_it {
* @ent: set of profiles for label, actual size determined by @size
*/
struct aa_label {
- struct kref count;
+ struct percpu_ref count;
+ long flags;
+ struct aa_proxy *proxy;
struct rb_node node;
struct rcu_head rcu;
- struct aa_proxy *proxy;
__counted char *hname;
- long flags;
u32 secid;
int size;
struct aa_profile *vec[];
@@ -276,7 +276,7 @@ void __aa_labelset_update_subtree(struct aa_ns *ns);
void aa_label_destroy(struct aa_label *label);
void aa_label_free(struct aa_label *label);
-void aa_label_kref(struct kref *kref);
+void aa_label_percpu_ref(struct percpu_ref *ref);
bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
@@ -373,7 +373,7 @@ int aa_label_match(struct aa_profile *profile, struct aa_ruleset *rules,
*/
static inline struct aa_label *__aa_get_label(struct aa_label *l)
{
- if (l && kref_get_unless_zero(&l->count))
+ if (l && percpu_ref_tryget(&l->count))
return l;
return NULL;
@@ -382,7 +382,7 @@ static inline struct aa_label *__aa_get_label(struct aa_label *l)
static inline struct aa_label *aa_get_label(struct aa_label *l)
{
if (l)
- kref_get(&(l->count));
+ percpu_ref_get(&(l->count));
return l;
}
@@ -402,7 +402,7 @@ static inline struct aa_label *aa_get_label_rcu(struct aa_label __rcu **l)
rcu_read_lock();
do {
c = rcu_dereference(*l);
- } while (c && !kref_get_unless_zero(&c->count));
+ } while (c && !percpu_ref_tryget(&c->count));
rcu_read_unlock();
return c;
@@ -442,7 +442,7 @@ static inline struct aa_label *aa_get_newest_label(struct aa_label *l)
static inline void aa_put_label(struct aa_label *l)
{
if (l)
- kref_put(&l->count, aa_label_kref);
+ percpu_ref_put(&l->count);
}
@@ -329,7 +329,7 @@ static inline aa_state_t ANY_RULE_MEDIATES(struct list_head *head,
static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
{
if (p)
- kref_get(&(p->label.count));
+ percpu_ref_get(&(p->label.count));
return p;
}
@@ -343,7 +343,7 @@ static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
*/
static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
{
- if (p && kref_get_unless_zero(&p->label.count))
+ if (p && percpu_ref_tryget(&p->label.count))
return p;
return NULL;
@@ -363,7 +363,7 @@ static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
rcu_read_lock();
do {
c = rcu_dereference(*p);
- } while (c && !kref_get_unless_zero(&c->label.count));
+ } while (c && !percpu_ref_tryget(&c->label.count));
rcu_read_unlock();
return c;
@@ -376,7 +376,7 @@ static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
static inline void aa_put_profile(struct aa_profile *p)
{
if (p)
- kref_put(&p->label.count, aa_label_kref);
+ percpu_ref_put(&p->label.count);
}
static inline int AUDIT_MODE(struct aa_profile *profile)
@@ -336,6 +336,7 @@ void aa_label_destroy(struct aa_label *label)
rcu_assign_pointer(label->proxy->label, NULL);
aa_put_proxy(label->proxy);
}
+ percpu_ref_exit(&label->count);
aa_free_secid(label->secid);
label->proxy = (struct aa_proxy *) PROXY_POISON + 1;
@@ -369,9 +370,9 @@ static void label_free_rcu(struct rcu_head *head)
label_free_switch(label);
}
-void aa_label_kref(struct kref *kref)
+void aa_label_percpu_ref(struct percpu_ref *ref)
{
- struct aa_label *label = container_of(kref, struct aa_label, count);
+ struct aa_label *label = container_of(ref, struct aa_label, count);
struct aa_ns *ns = labels_ns(label);
if (!ns) {
@@ -408,7 +409,11 @@ bool aa_label_init(struct aa_label *label, int size, gfp_t gfp)
label->size = size; /* doesn't include null */
label->vec[size] = NULL; /* null terminate */
- kref_init(&label->count);
+ if (percpu_ref_init(&label->count, aa_label_percpu_ref, PERCPU_REF_INIT_ATOMIC, gfp)) {
+ aa_free_secid(label->secid);
+ return false;
+ }
+
RB_CLEAR_NODE(&label->node);
return true;
In preparation of using percpu refcount for labels, this patch replaces label kref with percpu refcount. The percpu ref is initialized to atomic mode, as using percpu mode, requires tracking ref kill points. As the atomic counter is in a different cacheline now, rearrange some of the fields - flags, proxy; to optimize some of the fast paths for unconfined labels. In addition to the requirement to cleanup the percpu ref using percpu_ref_exit() in label destruction path, other potential impact from this patch could be: - Increase in memory requirement (for per cpu counters) for each label. - Displacement of aa_label struct members to different cacheline, as percpu ref takes 2 pointers space. - Moving of the atomic counter outside of the cacheline of the aa_label struct. Signed-off-by: Neeraj Upadhyay <Neeraj.Upadhyay@amd.com> --- security/apparmor/include/label.h | 16 ++++++++-------- security/apparmor/include/policy.h | 8 ++++---- security/apparmor/label.c | 11 ++++++++--- 3 files changed, 20 insertions(+), 15 deletions(-)