Message ID | 20190409192217.23459-5-andrew.murray@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: Support perf event modifiers :G and :H | expand |
On 04/09/2019 08:22 PM, Andrew Murray wrote: > Add support for the :G and :H attributes in perf by handling the > exclude_host/exclude_guest event attributes. > > We notify KVM of counters that we wish to be enabled or disabled on > guest entry/exit and thus defer from starting or stopping events based > on their event attributes. > > With !VHE we switch the counters between host/guest at EL2. We are able > to eliminate counters counting host events on the boundaries of guest > entry/exit when using :G by filtering out EL2 for exclude_host. When > using !exclude_hv there is a small blackout window at the guest > entry/exit where host events are not captured. > > Signed-off-by: Andrew Murray <andrew.murray@arm.com> > Acked-by: Will Deacon <will.deacon@arm.com> > --- > arch/arm64/kernel/perf_event.c | 43 ++++++++++++++++++++++++++++------ > 1 file changed, 36 insertions(+), 7 deletions(-) > > diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c > index cccf4fc86d67..6bb28aaf5aea 100644 > --- a/arch/arm64/kernel/perf_event.c > +++ b/arch/arm64/kernel/perf_event.c > @@ -26,6 +26,7 @@ > > #include <linux/acpi.h> > #include <linux/clocksource.h> > +#include <linux/kvm_host.h> > #include <linux/of.h> > #include <linux/perf/arm_pmu.h> > #include <linux/platform_device.h> > @@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx) > > static inline void armv8pmu_enable_event_counter(struct perf_event *event) > { > + struct perf_event_attr *attr = &event->attr; > int idx = event->hw.idx; > + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); > > - armv8pmu_enable_counter(idx); > if (armv8pmu_event_is_chained(event)) > - armv8pmu_enable_counter(idx - 1); > + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); > + > + kvm_set_pmu_events(counter_bits, attr); > + > + /* We rely on the hypervisor switch code to enable guest counters */ > + if (!kvm_pmu_counter_deferred(attr)) { > + armv8pmu_enable_counter(idx); > + if (armv8pmu_event_is_chained(event)) > + armv8pmu_enable_counter(idx - 1); > + } > } > > static inline int armv8pmu_disable_counter(int idx) > @@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx) > static inline void armv8pmu_disable_event_counter(struct perf_event *event) > { > struct hw_perf_event *hwc = &event->hw; > + struct perf_event_attr *attr = &event->attr; > int idx = hwc->idx; > + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); > > if (armv8pmu_event_is_chained(event)) > - armv8pmu_disable_counter(idx - 1); > - armv8pmu_disable_counter(idx); > + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); > + > + kvm_clr_pmu_events(counter_bits); > + > + /* We rely on the hypervisor switch code to disable guest counters */ > + if (!kvm_pmu_counter_deferred(attr)) { > + if (armv8pmu_event_is_chained(event)) > + armv8pmu_disable_counter(idx - 1); > + armv8pmu_disable_counter(idx); > + } > } > > static inline int armv8pmu_enable_intens(int idx) > @@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, > if (!attr->exclude_kernel) > config_base |= ARMV8_PMU_INCLUDE_EL2; > } else { > - if (attr->exclude_kernel) > - config_base |= ARMV8_PMU_EXCLUDE_EL1; > - if (!attr->exclude_hv) > + if (!attr->exclude_hv && !attr->exclude_host) > config_base |= ARMV8_PMU_INCLUDE_EL2; > } > + > + /* > + * Filter out !VHE kernels and guest kernels > + */ > + if (attr->exclude_kernel) > + config_base |= ARMV8_PMU_EXCLUDE_EL1; > + > if (attr->exclude_user) > config_base |= ARMV8_PMU_EXCLUDE_EL0; > > @@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info) > armv8pmu_disable_intens(idx); > } > > + /* Clear the counters we flip at guest entry/exit */ > + kvm_clr_pmu_events(U32_MAX); > + > /* > * Initialize & Reset PMNC. Request overflow interrupt for > * 64 bit cycle counter but cheat in armv8pmu_write_counter(). > Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index cccf4fc86d67..6bb28aaf5aea 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -26,6 +26,7 @@ #include <linux/acpi.h> #include <linux/clocksource.h> +#include <linux/kvm_host.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> @@ -528,11 +529,21 @@ static inline int armv8pmu_enable_counter(int idx) static inline void armv8pmu_enable_event_counter(struct perf_event *event) { + struct perf_event_attr *attr = &event->attr; int idx = event->hw.idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); - armv8pmu_enable_counter(idx); if (armv8pmu_event_is_chained(event)) - armv8pmu_enable_counter(idx - 1); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_set_pmu_events(counter_bits, attr); + + /* We rely on the hypervisor switch code to enable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + armv8pmu_enable_counter(idx); + if (armv8pmu_event_is_chained(event)) + armv8pmu_enable_counter(idx - 1); + } } static inline int armv8pmu_disable_counter(int idx) @@ -545,11 +556,21 @@ static inline int armv8pmu_disable_counter(int idx) static inline void armv8pmu_disable_event_counter(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; + struct perf_event_attr *attr = &event->attr; int idx = hwc->idx; + u32 counter_bits = BIT(ARMV8_IDX_TO_COUNTER(idx)); if (armv8pmu_event_is_chained(event)) - armv8pmu_disable_counter(idx - 1); - armv8pmu_disable_counter(idx); + counter_bits |= BIT(ARMV8_IDX_TO_COUNTER(idx - 1)); + + kvm_clr_pmu_events(counter_bits); + + /* We rely on the hypervisor switch code to disable guest counters */ + if (!kvm_pmu_counter_deferred(attr)) { + if (armv8pmu_event_is_chained(event)) + armv8pmu_disable_counter(idx - 1); + armv8pmu_disable_counter(idx); + } } static inline int armv8pmu_enable_intens(int idx) @@ -829,11 +850,16 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, if (!attr->exclude_kernel) config_base |= ARMV8_PMU_INCLUDE_EL2; } else { - if (attr->exclude_kernel) - config_base |= ARMV8_PMU_EXCLUDE_EL1; - if (!attr->exclude_hv) + if (!attr->exclude_hv && !attr->exclude_host) config_base |= ARMV8_PMU_INCLUDE_EL2; } + + /* + * Filter out !VHE kernels and guest kernels + */ + if (attr->exclude_kernel) + config_base |= ARMV8_PMU_EXCLUDE_EL1; + if (attr->exclude_user) config_base |= ARMV8_PMU_EXCLUDE_EL0; @@ -863,6 +889,9 @@ static void armv8pmu_reset(void *info) armv8pmu_disable_intens(idx); } + /* Clear the counters we flip at guest entry/exit */ + kvm_clr_pmu_events(U32_MAX); + /* * Initialize & Reset PMNC. Request overflow interrupt for * 64 bit cycle counter but cheat in armv8pmu_write_counter().