@@ -2761,11 +2761,11 @@ static bool perf_hw_regs(struct pt_regs *regs)
void
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
- struct perf_guest_info_callbacks *guest_cbs = this_cpu_read(perf_guest_cbs);
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct unwind_state state;
unsigned long addr;
- if (guest_cbs && guest_cbs->is_in_guest()) {
+ if (guest_cbs) {
/* TODO: We don't support guest os callchain now */
return;
}
@@ -2865,11 +2865,11 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
- struct perf_guest_info_callbacks *guest_cbs = this_cpu_read(perf_guest_cbs);
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
struct stack_frame frame;
const struct stack_frame __user *fp;
- if (guest_cbs && guest_cbs->is_in_guest()) {
+ if (guest_cbs) {
/* TODO: We don't support guest os callchain now */
return;
}
@@ -2946,9 +2946,9 @@ static unsigned long code_segment_base(struct pt_regs *regs)
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- struct perf_guest_info_callbacks *guest_cbs = this_cpu_read(perf_guest_cbs);
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
- if (guest_cbs && guest_cbs->is_in_guest())
+ if (guest_cbs)
return guest_cbs->get_guest_ip();
return regs->ip + code_segment_base(regs);
@@ -2956,10 +2956,10 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
unsigned long perf_misc_flags(struct pt_regs *regs)
{
- struct perf_guest_info_callbacks *guest_cbs = this_cpu_read(perf_guest_cbs);
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
int misc = 0;
- if (guest_cbs && guest_cbs->is_in_guest()) {
+ if (guest_cbs) {
if (guest_cbs->is_user_mode())
misc |= PERF_RECORD_MISC_GUEST_USER;
else
@@ -2853,9 +2853,8 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
handled++;
- guest_cbs = this_cpu_read(perf_guest_cbs);
- if (unlikely(guest_cbs && guest_cbs->is_in_guest() &&
- guest_cbs->handle_intel_pt_intr))
+ guest_cbs = perf_get_guest_cbs();
+ if (unlikely(guest_cbs && guest_cbs->handle_intel_pt_intr))
guest_cbs->handle_intel_pt_intr();
else
intel_pt_interrupt();
@@ -1241,6 +1241,23 @@ DECLARE_PER_CPU(struct perf_guest_info_callbacks *, perf_guest_cbs);
extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
extern void perf_unregister_guest_info_callbacks(void);
extern void perf_register_guest_info_callbacks_all_cpus(struct perf_guest_info_callbacks *cbs);
+/*
+ * Returns guest callbacks for the current CPU if callbacks are registered and
+ * the PMI fired while a guest was running, otherwise returns NULL.
+ */
+static inline struct perf_guest_info_callbacks *perf_get_guest_cbs(void)
+{
+ struct perf_guest_info_callbacks *guest_cbs = this_cpu_read(perf_guest_cbs);
+
+ /*
+ * Implementing is_in_guest is optional if the callbacks are registered
+ * only when "in guest".
+ */
+ if (guest_cbs && (!guest_cbs->is_in_guest || guest_cbs->is_in_guest()))
+ return guest_cbs;
+
+ return NULL;
+}
#endif /* CONFIG_HAVE_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
@@ -5461,12 +5461,6 @@ struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
}
#ifdef __KVM_WANT_PERF_CALLBACKS
-static int kvm_is_in_guest(void)
-{
- /* Registration of KVM's callback signifies "in guest". */
- return true;
-}
-
static int kvm_is_user_mode(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
@@ -5488,7 +5482,8 @@ static unsigned long kvm_get_guest_ip(void)
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
+ /* Registration of KVM's callback signifies "in guest". */
+ .is_in_guest = NULL,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
.handle_intel_pt_intr = NULL,
Interpret a null ->is_in_guest callback as meaning "in guest" and use the new semantics in KVM, which currently returns 'true' unconditionally in its implementation of ->is_in_guest(). This avoids a retpoline on the indirect call for PMIs that arrive in a KVM guest, and also provides a handy excuse for a wrapper around retrieval of perf_get_guest_cbs, e.g. to reduce the probability of an errant direct read of perf_guest_cbs. Signed-off-by: Sean Christopherson <seanjc@google.com> --- arch/x86/events/core.c | 16 ++++++++-------- arch/x86/events/intel/core.c | 5 ++--- include/linux/perf_event.h | 17 +++++++++++++++++ virt/kvm/kvm_main.c | 9 ++------- 4 files changed, 29 insertions(+), 18 deletions(-)