@@ -376,6 +376,7 @@ enum event_type_t {
/* see ctx_resched() for details */
EVENT_CPU = 0x8,
EVENT_CGROUP = 0x10,
+ EVENT_FLAGS = EVENT_CGROUP,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
@@ -699,23 +700,32 @@ do { \
___p; \
})
-static void perf_ctx_disable(struct perf_event_context *ctx, bool cgroup)
+static bool perf_skip_pmu_ctx(struct perf_event_pmu_context *pmu_ctx,
+ enum event_type_t event_type)
+{
+ if ((event_type & EVENT_CGROUP) && !pmu_ctx->nr_cgroups)
+ return true;
+
+ return false;
+}
+
+static void perf_ctx_disable(struct perf_event_context *ctx, enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
+ if (perf_skip_pmu_ctx(pmu_ctx, event_type))
continue;
perf_pmu_disable(pmu_ctx->pmu);
}
}
-static void perf_ctx_enable(struct perf_event_context *ctx, bool cgroup)
+static void perf_ctx_enable(struct perf_event_context *ctx, enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
+ if (perf_skip_pmu_ctx(pmu_ctx, event_type))
continue;
perf_pmu_enable(pmu_ctx->pmu);
}
@@ -877,7 +887,7 @@ static void perf_cgroup_switch(struct task_struct *task)
return;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
- perf_ctx_disable(&cpuctx->ctx, true);
+ perf_ctx_disable(&cpuctx->ctx, EVENT_CGROUP);
ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
/*
@@ -893,7 +903,7 @@ static void perf_cgroup_switch(struct task_struct *task)
*/
ctx_sched_in(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
- perf_ctx_enable(&cpuctx->ctx, true);
+ perf_ctx_enable(&cpuctx->ctx, EVENT_CGROUP);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
@@ -2732,9 +2742,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
event_type &= EVENT_ALL;
- perf_ctx_disable(&cpuctx->ctx, false);
+ perf_ctx_disable(&cpuctx->ctx, 0);
if (task_ctx) {
- perf_ctx_disable(task_ctx, false);
+ perf_ctx_disable(task_ctx, 0);
task_ctx_sched_out(task_ctx, event_type);
}
@@ -2752,9 +2762,9 @@ static void ctx_resched(struct perf_cpu_context *cpuctx,
perf_event_sched_in(cpuctx, task_ctx);
- perf_ctx_enable(&cpuctx->ctx, false);
+ perf_ctx_enable(&cpuctx->ctx, 0);
if (task_ctx)
- perf_ctx_enable(task_ctx, false);
+ perf_ctx_enable(task_ctx, 0);
}
void perf_pmu_resched(struct pmu *pmu)
@@ -3299,9 +3309,6 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
struct perf_event_pmu_context *pmu_ctx;
int is_active = ctx->is_active;
- bool cgroup = event_type & EVENT_CGROUP;
-
- event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@@ -3336,7 +3343,7 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
barrier();
}
- ctx->is_active &= ~event_type;
+ ctx->is_active &= ~(event_type & ~EVENT_FLAGS);
if (!(ctx->is_active & EVENT_ALL))
ctx->is_active = 0;
@@ -3349,7 +3356,7 @@ ctx_sched_out(struct perf_event_context *ctx, enum event_type_t event_type)
is_active ^= ctx->is_active; /* changed bits */
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
+ if (perf_skip_pmu_ctx(pmu_ctx, event_type))
continue;
__pmu_ctx_sched_out(pmu_ctx, is_active);
}
@@ -3543,7 +3550,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
/* PMIs are disabled; ctx->nr_pending is stable. */
if (local_read(&ctx->nr_pending) ||
@@ -3563,7 +3570,7 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
perf_ctx_sched_task_cb(ctx, false);
perf_event_swap_task_ctx_data(ctx, next_ctx);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
/*
* RCU_INIT_POINTER here is safe because we've not
@@ -3587,13 +3594,13 @@ perf_event_context_sched_out(struct task_struct *task, struct task_struct *next)
if (do_switch) {
raw_spin_lock(&ctx->lock);
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
inside_switch:
perf_ctx_sched_task_cb(ctx, false);
task_ctx_sched_out(ctx, EVENT_ALL);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
raw_spin_unlock(&ctx->lock);
}
}
@@ -3890,12 +3897,12 @@ static void pmu_groups_sched_in(struct perf_event_context *ctx,
static void ctx_groups_sched_in(struct perf_event_context *ctx,
struct perf_event_groups *groups,
- bool cgroup)
+ enum event_type_t event_type)
{
struct perf_event_pmu_context *pmu_ctx;
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
- if (cgroup && !pmu_ctx->nr_cgroups)
+ if (perf_skip_pmu_ctx(pmu_ctx, event_type))
continue;
pmu_groups_sched_in(ctx, groups, pmu_ctx->pmu);
}
@@ -3912,9 +3919,6 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
int is_active = ctx->is_active;
- bool cgroup = event_type & EVENT_CGROUP;
-
- event_type &= ~EVENT_CGROUP;
lockdep_assert_held(&ctx->lock);
@@ -3932,7 +3936,7 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
barrier();
}
- ctx->is_active |= (event_type | EVENT_TIME);
+ ctx->is_active |= ((event_type & ~EVENT_FLAGS) | EVENT_TIME);
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
@@ -3947,11 +3951,11 @@ ctx_sched_in(struct perf_event_context *ctx, enum event_type_t event_type)
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED)
- ctx_groups_sched_in(ctx, &ctx->pinned_groups, cgroup);
+ ctx_groups_sched_in(ctx, &ctx->pinned_groups, event_type);
/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE)
- ctx_groups_sched_in(ctx, &ctx->flexible_groups, cgroup);
+ ctx_groups_sched_in(ctx, &ctx->flexible_groups, event_type);
}
static void perf_event_context_sched_in(struct task_struct *task)
@@ -3966,11 +3970,11 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx) {
perf_ctx_lock(cpuctx, ctx);
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
perf_ctx_sched_task_cb(ctx, true);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
perf_ctx_unlock(cpuctx, ctx);
goto rcu_unlock;
}
@@ -3983,7 +3987,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
if (!ctx->nr_events)
goto unlock;
- perf_ctx_disable(ctx, false);
+ perf_ctx_disable(ctx, 0);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
@@ -3993,7 +3997,7 @@ static void perf_event_context_sched_in(struct task_struct *task)
* events, no need to flip the cpuctx's events around.
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree)) {
- perf_ctx_disable(&cpuctx->ctx, false);
+ perf_ctx_disable(&cpuctx->ctx, 0);
ctx_sched_out(&cpuctx->ctx, EVENT_FLEXIBLE);
}
@@ -4002,9 +4006,9 @@ static void perf_event_context_sched_in(struct task_struct *task)
perf_ctx_sched_task_cb(cpuctx->task_ctx, true);
if (!RB_EMPTY_ROOT(&ctx->pinned_groups.tree))
- perf_ctx_enable(&cpuctx->ctx, false);
+ perf_ctx_enable(&cpuctx->ctx, 0);
- perf_ctx_enable(ctx, false);
+ perf_ctx_enable(ctx, 0);
unlock:
perf_ctx_unlock(cpuctx, ctx);